1 // SPDX-License-Identifier: ISC 2 3 #include <linux/etherdevice.h> 4 #include <linux/timekeeping.h> 5 #include "mt7603.h" 6 #include "mac.h" 7 #include "../trace.h" 8 9 #define MT_PSE_PAGE_SIZE 128 10 11 static u32 12 mt7603_ac_queue_mask0(u32 mask) 13 { 14 u32 ret = 0; 15 16 ret |= GENMASK(3, 0) * !!(mask & BIT(0)); 17 ret |= GENMASK(8, 5) * !!(mask & BIT(1)); 18 ret |= GENMASK(13, 10) * !!(mask & BIT(2)); 19 ret |= GENMASK(19, 16) * !!(mask & BIT(3)); 20 return ret; 21 } 22 23 static void 24 mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask) 25 { 26 mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask)); 27 } 28 29 static void 30 mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask) 31 { 32 mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask)); 33 } 34 35 void mt7603_mac_reset_counters(struct mt7603_dev *dev) 36 { 37 int i; 38 39 for (i = 0; i < 2; i++) 40 mt76_rr(dev, MT_TX_AGG_CNT(i)); 41 42 memset(dev->mphy.aggr_stats, 0, sizeof(dev->mphy.aggr_stats)); 43 } 44 45 void mt7603_mac_set_timing(struct mt7603_dev *dev) 46 { 47 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 48 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 49 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 50 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24); 51 int offset = 3 * dev->coverage_class; 52 u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 53 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 54 bool is_5ghz = dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ; 55 int sifs; 56 u32 val; 57 58 if (is_5ghz) 59 sifs = 16; 60 else 61 sifs = 10; 62 63 mt76_set(dev, MT_ARB_SCR, 64 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 65 udelay(1); 66 67 mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset); 68 mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset); 69 mt76_wr(dev, MT_IFS, 70 FIELD_PREP(MT_IFS_EIFS, 360) | 71 FIELD_PREP(MT_IFS_RIFS, 2) | 72 FIELD_PREP(MT_IFS_SIFS, sifs) | 73 FIELD_PREP(MT_IFS_SLOT, dev->slottime)); 74 75 if (dev->slottime < 20 || is_5ghz) 76 val = MT7603_CFEND_RATE_DEFAULT; 77 else 78 val = MT7603_CFEND_RATE_11B; 79 80 mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val); 81 82 mt76_clear(dev, MT_ARB_SCR, 83 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 84 } 85 86 static void 87 mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask) 88 { 89 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 90 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 91 92 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 93 } 94 95 static u32 96 mt7603_wtbl1_addr(int idx) 97 { 98 return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 99 } 100 101 static u32 102 mt7603_wtbl2_addr(int idx) 103 { 104 /* Mapped to WTBL2 */ 105 return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE; 106 } 107 108 static u32 109 mt7603_wtbl3_addr(int idx) 110 { 111 u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE); 112 113 return base + idx * MT_WTBL3_SIZE; 114 } 115 116 static u32 117 mt7603_wtbl4_addr(int idx) 118 { 119 u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE); 120 121 return base + idx * MT_WTBL4_SIZE; 122 } 123 124 void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif, 125 const u8 *mac_addr) 126 { 127 const void *_mac = mac_addr; 128 u32 addr = mt7603_wtbl1_addr(idx); 129 u32 w0 = 0, w1 = 0; 130 int i; 131 132 if (_mac) { 133 w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI, 134 get_unaligned_le16(_mac + 4)); 135 w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO, 136 get_unaligned_le32(_mac)); 137 } 138 139 if (vif < 0) 140 vif = 0; 141 else 142 w0 |= MT_WTBL1_W0_RX_CHECK_A1; 143 w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif); 144 145 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 146 147 mt76_set(dev, addr + 0 * 4, w0); 148 mt76_set(dev, addr + 1 * 4, w1); 149 mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL); 150 151 mt76_stop_tx_ac(dev, GENMASK(3, 0)); 152 addr = mt7603_wtbl2_addr(idx); 153 for (i = 0; i < MT_WTBL2_SIZE; i += 4) 154 mt76_wr(dev, addr + i, 0); 155 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 156 mt76_start_tx_ac(dev, GENMASK(3, 0)); 157 158 addr = mt7603_wtbl3_addr(idx); 159 for (i = 0; i < MT_WTBL3_SIZE; i += 4) 160 mt76_wr(dev, addr + i, 0); 161 162 addr = mt7603_wtbl4_addr(idx); 163 for (i = 0; i < MT_WTBL4_SIZE; i += 4) 164 mt76_wr(dev, addr + i, 0); 165 166 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 167 } 168 169 static void 170 mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled) 171 { 172 u32 addr = mt7603_wtbl1_addr(idx); 173 u32 val = mt76_rr(dev, addr + 3 * 4); 174 175 val &= ~MT_WTBL1_W3_SKIP_TX; 176 val |= enabled * MT_WTBL1_W3_SKIP_TX; 177 178 mt76_wr(dev, addr + 3 * 4, val); 179 } 180 181 void mt7603_filter_tx(struct mt7603_dev *dev, int mac_idx, int idx, bool abort) 182 { 183 u32 flush_mask; 184 int i, port, queue; 185 186 if (abort) { 187 port = 3; /* PSE */ 188 queue = 8; /* free queue */ 189 } else { 190 port = 0; /* HIF */ 191 queue = 1; /* MCU queue */ 192 } 193 194 mt7603_wtbl_set_skip_tx(dev, idx, true); 195 196 mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN | 197 FIELD_PREP(MT_TX_ABORT_WCID, idx)); 198 199 flush_mask = MT_WF_ARB_TX_FLUSH_AC0 | 200 MT_WF_ARB_TX_FLUSH_AC1 | 201 MT_WF_ARB_TX_FLUSH_AC2 | 202 MT_WF_ARB_TX_FLUSH_AC3; 203 flush_mask <<= mac_idx; 204 205 mt76_wr(dev, MT_WF_ARB_TX_FLUSH_0, flush_mask); 206 mt76_poll(dev, MT_WF_ARB_TX_FLUSH_0, flush_mask, 0, 20000); 207 mt76_wr(dev, MT_WF_ARB_TX_START_0, flush_mask); 208 209 mt76_wr(dev, MT_TX_ABORT, 0); 210 211 for (i = 0; i < 4; i++) { 212 mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | 213 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) | 214 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) | 215 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) | 216 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue)); 217 218 mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000); 219 } 220 221 WARN_ON_ONCE(mt76_rr(dev, MT_DMA_FQCR0) & MT_DMA_FQCR0_BUSY); 222 223 mt7603_wtbl_set_skip_tx(dev, idx, false); 224 } 225 226 void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, 227 bool enabled) 228 { 229 u32 addr = mt7603_wtbl1_addr(sta->wcid.idx); 230 231 if (sta->smps == enabled) 232 return; 233 234 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled); 235 sta->smps = enabled; 236 } 237 238 void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, 239 bool enabled) 240 { 241 int idx = sta->wcid.idx; 242 u32 addr; 243 244 spin_lock_bh(&dev->ps_lock); 245 246 if (sta->ps == enabled) 247 goto out; 248 249 mt76_wr(dev, MT_PSE_RTA, 250 FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) | 251 FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) | 252 FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) | 253 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) | 254 MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY); 255 256 mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000); 257 258 if (enabled) 259 mt7603_filter_tx(dev, sta->vif->idx, idx, false); 260 261 addr = mt7603_wtbl1_addr(idx); 262 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 263 mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE, 264 enabled * MT_WTBL1_W3_POWER_SAVE); 265 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 266 sta->ps = enabled; 267 268 out: 269 spin_unlock_bh(&dev->ps_lock); 270 } 271 272 void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx) 273 { 274 int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE; 275 int wtbl2_frame = idx / wtbl2_frame_size; 276 int wtbl2_entry = idx % wtbl2_frame_size; 277 278 int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE; 279 int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE; 280 int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size; 281 int wtbl3_entry = (idx % wtbl3_frame_size) * 2; 282 283 int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE; 284 int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE; 285 int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size; 286 int wtbl4_entry = idx % wtbl4_frame_size; 287 288 u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 289 int i; 290 291 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 292 293 mt76_wr(dev, addr + 0 * 4, 294 MT_WTBL1_W0_RX_CHECK_A1 | 295 MT_WTBL1_W0_RX_CHECK_A2 | 296 MT_WTBL1_W0_RX_VALID); 297 mt76_wr(dev, addr + 1 * 4, 0); 298 mt76_wr(dev, addr + 2 * 4, 0); 299 300 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 301 302 mt76_wr(dev, addr + 3 * 4, 303 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) | 304 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) | 305 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) | 306 MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM); 307 mt76_wr(dev, addr + 4 * 4, 308 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) | 309 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) | 310 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry)); 311 312 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 313 314 addr = mt7603_wtbl2_addr(idx); 315 316 /* Clear BA information */ 317 mt76_wr(dev, addr + (15 * 4), 0); 318 319 mt76_stop_tx_ac(dev, GENMASK(3, 0)); 320 for (i = 2; i <= 4; i++) 321 mt76_wr(dev, addr + (i * 4), 0); 322 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 323 mt76_start_tx_ac(dev, GENMASK(3, 0)); 324 325 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR); 326 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR); 327 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 328 } 329 330 void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta) 331 { 332 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 333 int idx = msta->wcid.idx; 334 u8 ampdu_density; 335 u32 addr; 336 u32 val; 337 338 addr = mt7603_wtbl1_addr(idx); 339 340 ampdu_density = sta->deflink.ht_cap.ampdu_density; 341 if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4) 342 ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; 343 344 val = mt76_rr(dev, addr + 2 * 4); 345 val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL; 346 val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, 347 sta->deflink.ht_cap.ampdu_factor) | 348 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, 349 sta->deflink.ht_cap.ampdu_density) | 350 MT_WTBL1_W2_TXS_BAF_REPORT; 351 352 if (sta->deflink.ht_cap.cap) 353 val |= MT_WTBL1_W2_HT; 354 if (sta->deflink.vht_cap.cap) 355 val |= MT_WTBL1_W2_VHT; 356 357 mt76_wr(dev, addr + 2 * 4, val); 358 359 addr = mt7603_wtbl2_addr(idx); 360 val = mt76_rr(dev, addr + 9 * 4); 361 val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 362 MT_WTBL2_W9_SHORT_GI_80); 363 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) 364 val |= MT_WTBL2_W9_SHORT_GI_20; 365 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) 366 val |= MT_WTBL2_W9_SHORT_GI_40; 367 mt76_wr(dev, addr + 9 * 4, val); 368 } 369 370 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) 371 { 372 mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr)); 373 mt76_wr(dev, MT_BA_CONTROL_1, 374 (get_unaligned_le16(addr + 4) | 375 FIELD_PREP(MT_BA_CONTROL_1_TID, tid) | 376 MT_BA_CONTROL_1_RESET)); 377 } 378 379 void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, 380 int ba_size) 381 { 382 u32 addr = mt7603_wtbl2_addr(wcid); 383 u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 384 (MT_WTBL2_W15_BA_WIN_SIZE << 385 (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT)); 386 u32 tid_val; 387 int i; 388 389 if (ba_size < 0) { 390 /* disable */ 391 mt76_clear(dev, addr + (15 * 4), tid_mask); 392 return; 393 } 394 395 for (i = 7; i > 0; i--) { 396 if (ba_size >= MT_AGG_SIZE_LIMIT(i)) 397 break; 398 } 399 400 tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 401 i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT); 402 403 mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val); 404 } 405 406 void mt7603_mac_sta_poll(struct mt7603_dev *dev) 407 { 408 static const u8 ac_to_tid[4] = { 409 [IEEE80211_AC_BE] = 0, 410 [IEEE80211_AC_BK] = 1, 411 [IEEE80211_AC_VI] = 4, 412 [IEEE80211_AC_VO] = 6 413 }; 414 struct ieee80211_sta *sta; 415 struct mt7603_sta *msta; 416 u32 total_airtime = 0; 417 u32 airtime[4]; 418 u32 addr; 419 int i; 420 421 rcu_read_lock(); 422 423 while (1) { 424 bool clear = false; 425 426 spin_lock_bh(&dev->mt76.sta_poll_lock); 427 if (list_empty(&dev->mt76.sta_poll_list)) { 428 spin_unlock_bh(&dev->mt76.sta_poll_lock); 429 break; 430 } 431 432 msta = list_first_entry(&dev->mt76.sta_poll_list, 433 struct mt7603_sta, wcid.poll_list); 434 list_del_init(&msta->wcid.poll_list); 435 spin_unlock_bh(&dev->mt76.sta_poll_lock); 436 437 addr = mt7603_wtbl4_addr(msta->wcid.idx); 438 for (i = 0; i < 4; i++) { 439 u32 airtime_last = msta->tx_airtime_ac[i]; 440 441 msta->tx_airtime_ac[i] = mt76_rr(dev, addr + i * 8); 442 airtime[i] = msta->tx_airtime_ac[i] - airtime_last; 443 airtime[i] *= 32; 444 total_airtime += airtime[i]; 445 446 if (msta->tx_airtime_ac[i] & BIT(22)) 447 clear = true; 448 } 449 450 if (clear) { 451 mt7603_wtbl_update(dev, msta->wcid.idx, 452 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 453 memset(msta->tx_airtime_ac, 0, 454 sizeof(msta->tx_airtime_ac)); 455 } 456 457 if (!msta->wcid.sta) 458 continue; 459 460 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 461 for (i = 0; i < 4; i++) { 462 struct mt76_queue *q = dev->mphy.q_tx[i]; 463 u8 qidx = q->hw_idx; 464 u8 tid = ac_to_tid[i]; 465 u32 txtime = airtime[qidx]; 466 467 if (!txtime) 468 continue; 469 470 ieee80211_sta_register_airtime(sta, tid, txtime, 0); 471 } 472 } 473 474 rcu_read_unlock(); 475 476 if (!total_airtime) 477 return; 478 479 spin_lock_bh(&dev->mt76.cc_lock); 480 dev->mphy.chan_state->cc_tx += total_airtime; 481 spin_unlock_bh(&dev->mt76.cc_lock); 482 } 483 484 static struct mt76_wcid * 485 mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast) 486 { 487 struct mt7603_sta *sta; 488 struct mt76_wcid *wcid; 489 490 if (idx >= MT7603_WTBL_SIZE) 491 return NULL; 492 493 wcid = rcu_dereference(dev->mt76.wcid[idx]); 494 if (unicast || !wcid) 495 return wcid; 496 497 if (!wcid->sta) 498 return NULL; 499 500 sta = container_of(wcid, struct mt7603_sta, wcid); 501 if (!sta->vif) 502 return NULL; 503 504 return &sta->vif->sta.wcid; 505 } 506 507 int 508 mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) 509 { 510 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 511 struct ieee80211_supported_band *sband; 512 struct ieee80211_hdr *hdr; 513 __le32 *rxd = (__le32 *)skb->data; 514 u32 rxd0 = le32_to_cpu(rxd[0]); 515 u32 rxd1 = le32_to_cpu(rxd[1]); 516 u32 rxd2 = le32_to_cpu(rxd[2]); 517 bool unicast = rxd1 & MT_RXD1_NORMAL_U2M; 518 bool insert_ccmp_hdr = false; 519 bool remove_pad; 520 int idx; 521 int i; 522 523 memset(status, 0, sizeof(*status)); 524 525 i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 526 sband = (i & 1) ? &dev->mphy.sband_5g.sband : &dev->mphy.sband_2g.sband; 527 i >>= 1; 528 529 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 530 status->wcid = mt7603_rx_get_wcid(dev, idx, unicast); 531 532 status->band = sband->band; 533 if (i < sband->n_channels) 534 status->freq = sband->channels[i].center_freq; 535 536 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 537 status->flag |= RX_FLAG_FAILED_FCS_CRC; 538 539 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 540 status->flag |= RX_FLAG_MMIC_ERROR; 541 542 /* ICV error or CCMP/BIP/WPI MIC error */ 543 if (rxd2 & MT_RXD2_NORMAL_ICV_ERR) 544 status->flag |= RX_FLAG_ONLY_MONITOR; 545 546 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 547 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 548 status->flag |= RX_FLAG_DECRYPTED; 549 status->flag |= RX_FLAG_IV_STRIPPED; 550 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 551 } 552 553 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 554 555 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 556 return -EINVAL; 557 558 if (!sband->channels) 559 return -EINVAL; 560 561 rxd += 4; 562 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 563 rxd += 4; 564 if ((u8 *)rxd - skb->data >= skb->len) 565 return -EINVAL; 566 } 567 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 568 u8 *data = (u8 *)rxd; 569 570 if (status->flag & RX_FLAG_DECRYPTED) { 571 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 572 case MT_CIPHER_AES_CCMP: 573 case MT_CIPHER_CCMP_CCX: 574 case MT_CIPHER_CCMP_256: 575 insert_ccmp_hdr = 576 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 577 fallthrough; 578 case MT_CIPHER_TKIP: 579 case MT_CIPHER_TKIP_NO_MIC: 580 case MT_CIPHER_GCMP: 581 case MT_CIPHER_GCMP_256: 582 status->iv[0] = data[5]; 583 status->iv[1] = data[4]; 584 status->iv[2] = data[3]; 585 status->iv[3] = data[2]; 586 status->iv[4] = data[1]; 587 status->iv[5] = data[0]; 588 break; 589 default: 590 break; 591 } 592 } 593 594 rxd += 4; 595 if ((u8 *)rxd - skb->data >= skb->len) 596 return -EINVAL; 597 } 598 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 599 status->timestamp = le32_to_cpu(rxd[0]); 600 status->flag |= RX_FLAG_MACTIME_START; 601 602 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | 603 MT_RXD2_NORMAL_NON_AMPDU))) { 604 status->flag |= RX_FLAG_AMPDU_DETAILS; 605 606 /* all subframes of an A-MPDU have the same timestamp */ 607 if (dev->rx_ampdu_ts != status->timestamp) { 608 if (!++dev->ampdu_ref) 609 dev->ampdu_ref++; 610 } 611 dev->rx_ampdu_ts = status->timestamp; 612 613 status->ampdu_ref = dev->ampdu_ref; 614 } 615 616 rxd += 2; 617 if ((u8 *)rxd - skb->data >= skb->len) 618 return -EINVAL; 619 } 620 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 621 u32 rxdg0 = le32_to_cpu(rxd[0]); 622 u32 rxdg3 = le32_to_cpu(rxd[3]); 623 bool cck = false; 624 625 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 626 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 627 case MT_PHY_TYPE_CCK: 628 cck = true; 629 fallthrough; 630 case MT_PHY_TYPE_OFDM: 631 i = mt76_get_rate(&dev->mt76, sband, i, cck); 632 break; 633 case MT_PHY_TYPE_HT_GF: 634 case MT_PHY_TYPE_HT: 635 status->encoding = RX_ENC_HT; 636 if (i > 15) 637 return -EINVAL; 638 break; 639 default: 640 return -EINVAL; 641 } 642 643 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 644 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 645 if (rxdg0 & MT_RXV1_HT_AD_CODE) 646 status->enc_flags |= RX_ENC_FLAG_LDPC; 647 648 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * 649 FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 650 651 status->rate_idx = i; 652 653 status->chains = dev->mphy.antenna_mask; 654 status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) + 655 dev->rssi_offset[0]; 656 status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) + 657 dev->rssi_offset[1]; 658 659 if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1) 660 status->bw = RATE_INFO_BW_40; 661 662 rxd += 6; 663 if ((u8 *)rxd - skb->data >= skb->len) 664 return -EINVAL; 665 } else { 666 return -EINVAL; 667 } 668 669 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 670 671 if (insert_ccmp_hdr) { 672 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 673 674 mt76_insert_ccmp_hdr(skb, key_id); 675 } 676 677 hdr = (struct ieee80211_hdr *)skb->data; 678 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) 679 return 0; 680 681 status->aggr = unicast && 682 !ieee80211_is_qos_nullfunc(hdr->frame_control); 683 status->qos_ctl = *ieee80211_get_qos_ctl(hdr); 684 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 685 686 return 0; 687 } 688 689 static u16 690 mt7603_mac_tx_rate_val(struct mt7603_dev *dev, 691 const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw) 692 { 693 u8 phy, nss, rate_idx; 694 u16 rateval; 695 696 *bw = 0; 697 if (rate->flags & IEEE80211_TX_RC_MCS) { 698 rate_idx = rate->idx; 699 nss = 1 + (rate->idx >> 3); 700 phy = MT_PHY_TYPE_HT; 701 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 702 phy = MT_PHY_TYPE_HT_GF; 703 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 704 *bw = 1; 705 } else { 706 const struct ieee80211_rate *r; 707 int band = dev->mphy.chandef.chan->band; 708 u16 val; 709 710 nss = 1; 711 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; 712 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 713 val = r->hw_value_short; 714 else 715 val = r->hw_value; 716 717 phy = val >> 8; 718 rate_idx = val & 0xff; 719 } 720 721 rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 722 FIELD_PREP(MT_TX_RATE_MODE, phy)); 723 724 if (stbc && nss == 1) 725 rateval |= MT_TX_RATE_STBC; 726 727 return rateval; 728 } 729 730 void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta, 731 struct ieee80211_tx_rate *probe_rate, 732 struct ieee80211_tx_rate *rates) 733 { 734 struct ieee80211_tx_rate *ref; 735 int wcid = sta->wcid.idx; 736 u32 addr = mt7603_wtbl2_addr(wcid); 737 bool stbc = false; 738 int n_rates = sta->n_rates; 739 u8 bw, bw_prev, bw_idx = 0; 740 u16 val[4]; 741 u16 probe_val; 742 u32 w9 = mt76_rr(dev, addr + 9 * 4); 743 bool rateset; 744 int i, k; 745 746 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 747 return; 748 749 for (i = n_rates; i < 4; i++) 750 rates[i] = rates[n_rates - 1]; 751 752 rateset = !(sta->rate_set_tsf & BIT(0)); 753 memcpy(sta->rateset[rateset].rates, rates, 754 sizeof(sta->rateset[rateset].rates)); 755 if (probe_rate) { 756 sta->rateset[rateset].probe_rate = *probe_rate; 757 ref = &sta->rateset[rateset].probe_rate; 758 } else { 759 sta->rateset[rateset].probe_rate.idx = -1; 760 ref = &sta->rateset[rateset].rates[0]; 761 } 762 763 rates = sta->rateset[rateset].rates; 764 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 765 /* 766 * We don't support switching between short and long GI 767 * within the rate set. For accurate tx status reporting, we 768 * need to make sure that flags match. 769 * For improved performance, avoid duplicate entries by 770 * decrementing the MCS index if necessary 771 */ 772 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 773 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 774 775 for (k = 0; k < i; k++) { 776 if (rates[i].idx != rates[k].idx) 777 continue; 778 if ((rates[i].flags ^ rates[k].flags) & 779 IEEE80211_TX_RC_40_MHZ_WIDTH) 780 continue; 781 782 if (!rates[i].idx) 783 continue; 784 785 rates[i].idx--; 786 } 787 } 788 789 w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 790 MT_WTBL2_W9_SHORT_GI_80; 791 792 val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw); 793 bw_prev = bw; 794 795 if (probe_rate) { 796 probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw); 797 if (bw) 798 bw_idx = 1; 799 else 800 bw_prev = 0; 801 } else { 802 probe_val = val[0]; 803 } 804 805 w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw); 806 w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw); 807 808 val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw); 809 if (bw_prev) { 810 bw_idx = 3; 811 bw_prev = bw; 812 } 813 814 val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw); 815 if (bw_prev) { 816 bw_idx = 5; 817 bw_prev = bw; 818 } 819 820 val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw); 821 if (bw_prev) 822 bw_idx = 7; 823 824 w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE, 825 bw_idx ? bw_idx - 1 : 7); 826 827 mt76_wr(dev, MT_WTBL_RIUCR0, w9); 828 829 mt76_wr(dev, MT_WTBL_RIUCR1, 830 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | 831 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | 832 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1])); 833 834 mt76_wr(dev, MT_WTBL_RIUCR2, 835 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) | 836 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | 837 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) | 838 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); 839 840 mt76_wr(dev, MT_WTBL_RIUCR3, 841 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | 842 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) | 843 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); 844 845 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 846 sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset; 847 848 mt76_wr(dev, MT_WTBL_UPDATE, 849 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 850 MT_WTBL_UPDATE_RATE_UPDATE | 851 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 852 853 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 854 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 855 856 sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates; 857 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 858 } 859 860 static enum mt76_cipher_type 861 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) 862 { 863 memset(key_data, 0, 32); 864 if (!key) 865 return MT_CIPHER_NONE; 866 867 if (key->keylen > 32) 868 return MT_CIPHER_NONE; 869 870 memcpy(key_data, key->key, key->keylen); 871 872 switch (key->cipher) { 873 case WLAN_CIPHER_SUITE_WEP40: 874 return MT_CIPHER_WEP40; 875 case WLAN_CIPHER_SUITE_WEP104: 876 return MT_CIPHER_WEP104; 877 case WLAN_CIPHER_SUITE_TKIP: 878 /* Rx/Tx MIC keys are swapped */ 879 memcpy(key_data + 16, key->key + 24, 8); 880 memcpy(key_data + 24, key->key + 16, 8); 881 return MT_CIPHER_TKIP; 882 case WLAN_CIPHER_SUITE_CCMP: 883 return MT_CIPHER_AES_CCMP; 884 default: 885 return MT_CIPHER_NONE; 886 } 887 } 888 889 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid, 890 struct ieee80211_key_conf *key) 891 { 892 enum mt76_cipher_type cipher; 893 u32 addr = mt7603_wtbl3_addr(wcid); 894 u8 key_data[32]; 895 int key_len = sizeof(key_data); 896 897 cipher = mt7603_mac_get_key_info(key, key_data); 898 if (cipher == MT_CIPHER_NONE && key) 899 return -EOPNOTSUPP; 900 901 if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) { 902 addr += key->keyidx * 16; 903 key_len = 16; 904 } 905 906 mt76_wr_copy(dev, addr, key_data, key_len); 907 908 addr = mt7603_wtbl1_addr(wcid); 909 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher); 910 if (key) 911 mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx); 912 mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key); 913 914 return 0; 915 } 916 917 static int 918 mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, 919 struct sk_buff *skb, enum mt76_txq_id qid, 920 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 921 int pid, struct ieee80211_key_conf *key) 922 { 923 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 924 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 925 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 926 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 927 struct ieee80211_vif *vif = info->control.vif; 928 struct mt76_queue *q = dev->mphy.q_tx[qid]; 929 struct mt7603_vif *mvif; 930 int wlan_idx; 931 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 932 int tx_count = 8; 933 u8 frame_type, frame_subtype; 934 u16 fc = le16_to_cpu(hdr->frame_control); 935 u16 seqno = 0; 936 u8 vif_idx = 0; 937 u32 val; 938 u8 bw; 939 940 if (vif) { 941 mvif = (struct mt7603_vif *)vif->drv_priv; 942 vif_idx = mvif->idx; 943 if (vif_idx && qid >= MT_TXQ_BEACON) 944 vif_idx += 0x10; 945 } 946 947 if (sta) { 948 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 949 950 tx_count = msta->rate_count; 951 } 952 953 if (wcid) 954 wlan_idx = wcid->idx; 955 else 956 wlan_idx = MT7603_WTBL_RESERVED; 957 958 frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2; 959 frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4; 960 961 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 962 FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx); 963 txwi[0] = cpu_to_le32(val); 964 965 val = MT_TXD1_LONG_FORMAT | 966 FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) | 967 FIELD_PREP(MT_TXD1_TID, 968 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 969 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 970 FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) | 971 FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) | 972 FIELD_PREP(MT_TXD1_PROTECTED, !!key); 973 txwi[1] = cpu_to_le32(val); 974 975 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 976 txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK); 977 978 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) | 979 FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) | 980 FIELD_PREP(MT_TXD2_MULTICAST, 981 is_multicast_ether_addr(hdr->addr1)); 982 txwi[2] = cpu_to_le32(val); 983 984 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 985 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 986 987 txwi[4] = 0; 988 989 val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT | 990 FIELD_PREP(MT_TXD5_PID, pid); 991 txwi[5] = cpu_to_le32(val); 992 993 txwi[6] = 0; 994 995 if (rate->idx >= 0 && rate->count && 996 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 997 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 998 u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw); 999 1000 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 1001 1002 val = MT_TXD6_FIXED_BW | 1003 FIELD_PREP(MT_TXD6_BW, bw) | 1004 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 1005 txwi[6] |= cpu_to_le32(val); 1006 1007 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 1008 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 1009 1010 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 1011 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 1012 1013 tx_count = rate->count; 1014 } 1015 1016 /* use maximum tx count for beacons and buffered multicast */ 1017 if (qid >= MT_TXQ_BEACON) 1018 tx_count = 0x1f; 1019 1020 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | 1021 MT_TXD3_SN_VALID; 1022 1023 if (ieee80211_is_data_qos(hdr->frame_control)) 1024 seqno = le16_to_cpu(hdr->seq_ctrl); 1025 else if (ieee80211_is_back_req(hdr->frame_control)) 1026 seqno = le16_to_cpu(bar->start_seq_num); 1027 else 1028 val &= ~MT_TXD3_SN_VALID; 1029 1030 val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4); 1031 1032 txwi[3] = cpu_to_le32(val); 1033 1034 if (key) { 1035 u64 pn = atomic64_inc_return(&key->tx_pn); 1036 1037 txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID); 1038 txwi[4] = cpu_to_le32(pn & GENMASK(31, 0)); 1039 txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32)); 1040 } 1041 1042 txwi[7] = 0; 1043 1044 return 0; 1045 } 1046 1047 int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1048 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1049 struct ieee80211_sta *sta, 1050 struct mt76_tx_info *tx_info) 1051 { 1052 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1053 struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid); 1054 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1055 struct ieee80211_key_conf *key = info->control.hw_key; 1056 int pid; 1057 1058 if (!wcid) 1059 wcid = &dev->global_sta.wcid; 1060 1061 if (sta) { 1062 msta = (struct mt7603_sta *)sta->drv_priv; 1063 1064 if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | 1065 IEEE80211_TX_CTL_CLEAR_PS_FILT)) || 1066 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) 1067 mt7603_wtbl_set_ps(dev, msta, false); 1068 1069 mt76_tx_check_agg_ssn(sta, tx_info->skb); 1070 } 1071 1072 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1073 1074 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { 1075 spin_lock_bh(&dev->mt76.lock); 1076 mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0], 1077 msta->rates); 1078 msta->rate_probe = true; 1079 spin_unlock_bh(&dev->mt76.lock); 1080 } 1081 1082 mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid, 1083 sta, pid, key); 1084 1085 return 0; 1086 } 1087 1088 static bool 1089 mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta, 1090 struct ieee80211_tx_info *info, __le32 *txs_data) 1091 { 1092 struct ieee80211_supported_band *sband; 1093 struct mt7603_rate_set *rs; 1094 int first_idx = 0, last_idx; 1095 u32 rate_set_tsf; 1096 u32 final_rate; 1097 u32 final_rate_flags; 1098 bool rs_idx; 1099 bool ack_timeout; 1100 bool fixed_rate; 1101 bool probe; 1102 bool ampdu; 1103 bool cck = false; 1104 int count; 1105 u32 txs; 1106 int idx; 1107 int i; 1108 1109 fixed_rate = info->status.rates[0].count; 1110 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1111 1112 txs = le32_to_cpu(txs_data[4]); 1113 ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU); 1114 count = FIELD_GET(MT_TXS4_TX_COUNT, txs); 1115 last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs); 1116 1117 txs = le32_to_cpu(txs_data[0]); 1118 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1119 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1120 1121 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1122 return false; 1123 1124 if (txs & MT_TXS0_QUEUE_TIMEOUT) 1125 return false; 1126 1127 if (!ack_timeout) 1128 info->flags |= IEEE80211_TX_STAT_ACK; 1129 1130 info->status.ampdu_len = 1; 1131 info->status.ampdu_ack_len = !!(info->flags & 1132 IEEE80211_TX_STAT_ACK); 1133 1134 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1135 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1136 1137 first_idx = max_t(int, 0, last_idx - (count - 1) / MT7603_RATE_RETRY); 1138 1139 if (fixed_rate && !probe) { 1140 info->status.rates[0].count = count; 1141 i = 0; 1142 goto out; 1143 } 1144 1145 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1146 rs_idx = !((u32)(le32_get_bits(txs_data[1], MT_TXS1_F0_TIMESTAMP) - 1147 rate_set_tsf) < 1000000); 1148 rs_idx ^= rate_set_tsf & BIT(0); 1149 rs = &sta->rateset[rs_idx]; 1150 1151 if (!first_idx && rs->probe_rate.idx >= 0) { 1152 info->status.rates[0] = rs->probe_rate; 1153 1154 spin_lock_bh(&dev->mt76.lock); 1155 if (sta->rate_probe) { 1156 mt7603_wtbl_set_rates(dev, sta, NULL, 1157 sta->rates); 1158 sta->rate_probe = false; 1159 } 1160 spin_unlock_bh(&dev->mt76.lock); 1161 } else { 1162 info->status.rates[0] = rs->rates[first_idx / 2]; 1163 } 1164 info->status.rates[0].count = 0; 1165 1166 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1167 struct ieee80211_tx_rate *cur_rate; 1168 int cur_count; 1169 1170 cur_rate = &rs->rates[idx / 2]; 1171 cur_count = min_t(int, MT7603_RATE_RETRY, count); 1172 count -= cur_count; 1173 1174 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1175 cur_rate->flags != info->status.rates[i].flags)) { 1176 i++; 1177 if (i == ARRAY_SIZE(info->status.rates)) { 1178 i--; 1179 break; 1180 } 1181 1182 info->status.rates[i] = *cur_rate; 1183 info->status.rates[i].count = 0; 1184 } 1185 1186 info->status.rates[i].count += cur_count; 1187 } 1188 1189 out: 1190 final_rate_flags = info->status.rates[i].flags; 1191 1192 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1193 case MT_PHY_TYPE_CCK: 1194 cck = true; 1195 fallthrough; 1196 case MT_PHY_TYPE_OFDM: 1197 if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ) 1198 sband = &dev->mphy.sband_5g.sband; 1199 else 1200 sband = &dev->mphy.sband_2g.sband; 1201 final_rate &= GENMASK(5, 0); 1202 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1203 cck); 1204 final_rate_flags = 0; 1205 break; 1206 case MT_PHY_TYPE_HT_GF: 1207 case MT_PHY_TYPE_HT: 1208 final_rate_flags |= IEEE80211_TX_RC_MCS; 1209 final_rate &= GENMASK(5, 0); 1210 if (final_rate > 15) 1211 return false; 1212 break; 1213 default: 1214 return false; 1215 } 1216 1217 info->status.rates[i].idx = final_rate; 1218 info->status.rates[i].flags = final_rate_flags; 1219 1220 return true; 1221 } 1222 1223 static bool 1224 mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid, 1225 __le32 *txs_data) 1226 { 1227 struct mt76_dev *mdev = &dev->mt76; 1228 struct sk_buff_head list; 1229 struct sk_buff *skb; 1230 1231 if (pid < MT_PACKET_ID_FIRST) 1232 return false; 1233 1234 trace_mac_txdone(mdev, sta->wcid.idx, pid); 1235 1236 mt76_tx_status_lock(mdev, &list); 1237 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1238 if (skb) { 1239 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1240 1241 if (!mt7603_fill_txs(dev, sta, info, txs_data)) { 1242 info->status.rates[0].count = 0; 1243 info->status.rates[0].idx = -1; 1244 } 1245 1246 mt76_tx_status_skb_done(mdev, skb, &list); 1247 } 1248 mt76_tx_status_unlock(mdev, &list); 1249 1250 return !!skb; 1251 } 1252 1253 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data) 1254 { 1255 struct ieee80211_tx_info info = {}; 1256 struct ieee80211_sta *sta = NULL; 1257 struct mt7603_sta *msta = NULL; 1258 struct mt76_wcid *wcid; 1259 __le32 *txs_data = data; 1260 u8 wcidx; 1261 u8 pid; 1262 1263 pid = le32_get_bits(txs_data[4], MT_TXS4_PID); 1264 wcidx = le32_get_bits(txs_data[3], MT_TXS3_WCID); 1265 1266 if (pid == MT_PACKET_ID_NO_ACK) 1267 return; 1268 1269 if (wcidx >= MT7603_WTBL_SIZE) 1270 return; 1271 1272 rcu_read_lock(); 1273 1274 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1275 if (!wcid) 1276 goto out; 1277 1278 msta = container_of(wcid, struct mt7603_sta, wcid); 1279 sta = wcid_to_sta(wcid); 1280 1281 if (list_empty(&msta->wcid.poll_list)) { 1282 spin_lock_bh(&dev->mt76.sta_poll_lock); 1283 list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); 1284 spin_unlock_bh(&dev->mt76.sta_poll_lock); 1285 } 1286 1287 if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data)) 1288 goto out; 1289 1290 if (wcidx >= MT7603_WTBL_STA || !sta) 1291 goto out; 1292 1293 if (mt7603_fill_txs(dev, msta, &info, txs_data)) { 1294 spin_lock_bh(&dev->mt76.rx_lock); 1295 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); 1296 spin_unlock_bh(&dev->mt76.rx_lock); 1297 } 1298 1299 out: 1300 rcu_read_unlock(); 1301 } 1302 1303 void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 1304 { 1305 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1306 struct sk_buff *skb = e->skb; 1307 1308 if (!e->txwi) { 1309 dev_kfree_skb_any(skb); 1310 return; 1311 } 1312 1313 dev->tx_hang_check = 0; 1314 mt76_tx_complete_skb(mdev, e->wcid, skb); 1315 } 1316 1317 static bool 1318 wait_for_wpdma(struct mt7603_dev *dev) 1319 { 1320 return mt76_poll(dev, MT_WPDMA_GLO_CFG, 1321 MT_WPDMA_GLO_CFG_TX_DMA_BUSY | 1322 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 1323 0, 1000); 1324 } 1325 1326 static void mt7603_pse_reset(struct mt7603_dev *dev) 1327 { 1328 /* Clear previous reset result */ 1329 if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1330 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S); 1331 1332 /* Reset PSE */ 1333 mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1334 1335 if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET, 1336 MT_MCU_DEBUG_RESET_PSE_S, 1337 MT_MCU_DEBUG_RESET_PSE_S, 500)) { 1338 dev->reset_cause[RESET_CAUSE_RESET_FAILED]++; 1339 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1340 } else { 1341 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1342 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES); 1343 } 1344 1345 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3) 1346 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1347 } 1348 1349 void mt7603_mac_dma_start(struct mt7603_dev *dev) 1350 { 1351 mt7603_mac_start(dev); 1352 1353 wait_for_wpdma(dev); 1354 usleep_range(50, 100); 1355 1356 mt76_set(dev, MT_WPDMA_GLO_CFG, 1357 (MT_WPDMA_GLO_CFG_TX_DMA_EN | 1358 MT_WPDMA_GLO_CFG_RX_DMA_EN | 1359 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | 1360 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE)); 1361 1362 mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL); 1363 } 1364 1365 void mt7603_mac_start(struct mt7603_dev *dev) 1366 { 1367 mt76_clear(dev, MT_ARB_SCR, 1368 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1369 mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0); 1370 mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1371 } 1372 1373 void mt7603_mac_stop(struct mt7603_dev *dev) 1374 { 1375 mt76_set(dev, MT_ARB_SCR, 1376 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1377 mt76_wr(dev, MT_WF_ARB_TX_START_0, 0); 1378 mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1379 } 1380 1381 void mt7603_pse_client_reset(struct mt7603_dev *dev) 1382 { 1383 u32 addr; 1384 1385 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + 1386 MT_CLIENT_RESET_TX); 1387 1388 /* Clear previous reset state */ 1389 mt76_clear(dev, addr, 1390 MT_CLIENT_RESET_TX_R_E_1 | 1391 MT_CLIENT_RESET_TX_R_E_2 | 1392 MT_CLIENT_RESET_TX_R_E_1_S | 1393 MT_CLIENT_RESET_TX_R_E_2_S); 1394 1395 /* Start PSE client TX abort */ 1396 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1); 1397 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S, 1398 MT_CLIENT_RESET_TX_R_E_1_S, 500); 1399 1400 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2); 1401 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); 1402 1403 /* Wait for PSE client to clear TX FIFO */ 1404 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S, 1405 MT_CLIENT_RESET_TX_R_E_2_S, 500); 1406 1407 /* Clear PSE client TX abort state */ 1408 mt76_clear(dev, addr, 1409 MT_CLIENT_RESET_TX_R_E_1 | 1410 MT_CLIENT_RESET_TX_R_E_2); 1411 } 1412 1413 static void mt7603_dma_sched_reset(struct mt7603_dev *dev) 1414 { 1415 if (!is_mt7628(dev)) 1416 return; 1417 1418 mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET); 1419 mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET); 1420 } 1421 1422 static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) 1423 { 1424 int beacon_int = dev->mt76.beacon_int; 1425 u32 mask = dev->mt76.mmio.irqmask; 1426 int i; 1427 1428 ieee80211_stop_queues(dev->mt76.hw); 1429 set_bit(MT76_RESET, &dev->mphy.state); 1430 1431 /* lock/unlock all queues to ensure that no tx is pending */ 1432 mt76_txq_schedule_all(&dev->mphy); 1433 1434 mt76_worker_disable(&dev->mt76.tx_worker); 1435 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); 1436 napi_disable(&dev->mt76.napi[0]); 1437 napi_disable(&dev->mt76.napi[1]); 1438 napi_disable(&dev->mt76.tx_napi); 1439 1440 mutex_lock(&dev->mt76.mutex); 1441 1442 mt7603_beacon_set_timer(dev, -1, 0); 1443 1444 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || 1445 dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY || 1446 dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK || 1447 dev->cur_reset_cause == RESET_CAUSE_TX_HANG) 1448 mt7603_pse_reset(dev); 1449 1450 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1451 goto skip_dma_reset; 1452 1453 mt7603_mac_stop(dev); 1454 1455 mt76_clear(dev, MT_WPDMA_GLO_CFG, 1456 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1457 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1458 usleep_range(1000, 2000); 1459 1460 mt7603_irq_disable(dev, mask); 1461 1462 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); 1463 1464 mt7603_pse_client_reset(dev); 1465 1466 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); 1467 for (i = 0; i < __MT_TXQ_MAX; i++) 1468 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 1469 1470 mt76_for_each_q_rx(&dev->mt76, i) { 1471 mt76_queue_rx_reset(dev, i); 1472 } 1473 1474 mt76_tx_status_check(&dev->mt76, true); 1475 1476 mt7603_dma_sched_reset(dev); 1477 1478 mt7603_mac_dma_start(dev); 1479 1480 mt7603_irq_enable(dev, mask); 1481 1482 skip_dma_reset: 1483 clear_bit(MT76_RESET, &dev->mphy.state); 1484 mutex_unlock(&dev->mt76.mutex); 1485 1486 mt76_worker_enable(&dev->mt76.tx_worker); 1487 1488 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); 1489 mt7603_beacon_set_timer(dev, -1, beacon_int); 1490 1491 local_bh_disable(); 1492 napi_enable(&dev->mt76.tx_napi); 1493 napi_schedule(&dev->mt76.tx_napi); 1494 1495 napi_enable(&dev->mt76.napi[0]); 1496 napi_schedule(&dev->mt76.napi[0]); 1497 1498 napi_enable(&dev->mt76.napi[1]); 1499 napi_schedule(&dev->mt76.napi[1]); 1500 local_bh_enable(); 1501 1502 ieee80211_wake_queues(dev->mt76.hw); 1503 mt76_txq_schedule_all(&dev->mphy); 1504 } 1505 1506 static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index) 1507 { 1508 u32 val; 1509 1510 mt76_wr(dev, MT_WPDMA_DEBUG, 1511 FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) | 1512 MT_WPDMA_DEBUG_SEL); 1513 1514 val = mt76_rr(dev, MT_WPDMA_DEBUG); 1515 return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val); 1516 } 1517 1518 static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev) 1519 { 1520 if (is_mt7628(dev)) 1521 return mt7603_dma_debug(dev, 9) & BIT(9); 1522 1523 return mt7603_dma_debug(dev, 2) & BIT(8); 1524 } 1525 1526 static bool mt7603_rx_dma_busy(struct mt7603_dev *dev) 1527 { 1528 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY)) 1529 return false; 1530 1531 return mt7603_rx_fifo_busy(dev); 1532 } 1533 1534 static bool mt7603_tx_dma_busy(struct mt7603_dev *dev) 1535 { 1536 u32 val; 1537 1538 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY)) 1539 return false; 1540 1541 val = mt7603_dma_debug(dev, 9); 1542 return (val & BIT(8)) && (val & 0xf) != 0xf; 1543 } 1544 1545 static bool mt7603_tx_hang(struct mt7603_dev *dev) 1546 { 1547 struct mt76_queue *q; 1548 u32 dma_idx, prev_dma_idx; 1549 int i; 1550 1551 for (i = 0; i < 4; i++) { 1552 q = dev->mphy.q_tx[i]; 1553 1554 if (!q->queued) 1555 continue; 1556 1557 prev_dma_idx = dev->tx_dma_idx[i]; 1558 dma_idx = readl(&q->regs->dma_idx); 1559 dev->tx_dma_idx[i] = dma_idx; 1560 1561 if (dma_idx == prev_dma_idx && 1562 dma_idx != readl(&q->regs->cpu_idx)) 1563 break; 1564 } 1565 1566 return i < 4; 1567 } 1568 1569 static bool mt7603_rx_pse_busy(struct mt7603_dev *dev) 1570 { 1571 u32 addr, val; 1572 1573 if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES) 1574 return true; 1575 1576 if (mt7603_rx_fifo_busy(dev)) 1577 return false; 1578 1579 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS); 1580 mt76_wr(dev, addr, 3); 1581 val = mt76_rr(dev, addr) >> 16; 1582 1583 if (is_mt7628(dev) && (val & 0x4001) == 0x4001) 1584 return true; 1585 1586 return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001; 1587 } 1588 1589 static bool 1590 mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter, 1591 enum mt7603_reset_cause cause, 1592 bool (*check)(struct mt7603_dev *dev)) 1593 { 1594 if (dev->reset_test == cause + 1) { 1595 dev->reset_test = 0; 1596 goto trigger; 1597 } 1598 1599 if (check) { 1600 if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) { 1601 *counter = 0; 1602 return false; 1603 } 1604 1605 (*counter)++; 1606 } 1607 1608 if (*counter < MT7603_WATCHDOG_TIMEOUT) 1609 return false; 1610 trigger: 1611 dev->cur_reset_cause = cause; 1612 dev->reset_cause[cause]++; 1613 return true; 1614 } 1615 1616 void mt7603_update_channel(struct mt76_phy *mphy) 1617 { 1618 struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76); 1619 struct mt76_channel_state *state; 1620 1621 state = mphy->chan_state; 1622 state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA); 1623 } 1624 1625 void 1626 mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val) 1627 { 1628 u32 rxtd_6 = 0xd7c80000; 1629 1630 if (val == dev->ed_strict_mode) 1631 return; 1632 1633 dev->ed_strict_mode = val; 1634 1635 /* Ensure that ED/CCA does not trigger if disabled */ 1636 if (!dev->ed_monitor) 1637 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34); 1638 else 1639 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d); 1640 1641 if (dev->ed_monitor && !dev->ed_strict_mode) 1642 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f); 1643 else 1644 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10); 1645 1646 mt76_wr(dev, MT_RXTD(6), rxtd_6); 1647 1648 mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN, 1649 dev->ed_monitor && !dev->ed_strict_mode); 1650 } 1651 1652 static void 1653 mt7603_edcca_check(struct mt7603_dev *dev) 1654 { 1655 u32 val = mt76_rr(dev, MT_AGC(41)); 1656 ktime_t cur_time; 1657 int rssi0, rssi1; 1658 u32 active; 1659 u32 ed_busy; 1660 1661 if (!dev->ed_monitor) 1662 return; 1663 1664 rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val); 1665 if (rssi0 > 128) 1666 rssi0 -= 256; 1667 1668 if (dev->mphy.antenna_mask & BIT(1)) { 1669 rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val); 1670 if (rssi1 > 128) 1671 rssi1 -= 256; 1672 } else { 1673 rssi1 = rssi0; 1674 } 1675 1676 if (max(rssi0, rssi1) >= -40 && 1677 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH) 1678 dev->ed_strong_signal++; 1679 else if (dev->ed_strong_signal > 0) 1680 dev->ed_strong_signal--; 1681 1682 cur_time = ktime_get_boottime(); 1683 ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK; 1684 1685 active = ktime_to_us(ktime_sub(cur_time, dev->ed_time)); 1686 dev->ed_time = cur_time; 1687 1688 if (!active) 1689 return; 1690 1691 if (100 * ed_busy / active > 90) { 1692 if (dev->ed_trigger < 0) 1693 dev->ed_trigger = 0; 1694 dev->ed_trigger++; 1695 } else { 1696 if (dev->ed_trigger > 0) 1697 dev->ed_trigger = 0; 1698 dev->ed_trigger--; 1699 } 1700 1701 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH || 1702 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) { 1703 mt7603_edcca_set_strict(dev, true); 1704 } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) { 1705 mt7603_edcca_set_strict(dev, false); 1706 } 1707 1708 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH) 1709 dev->ed_trigger = MT7603_EDCCA_BLOCK_TH; 1710 else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) 1711 dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH; 1712 } 1713 1714 void mt7603_cca_stats_reset(struct mt7603_dev *dev) 1715 { 1716 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1717 mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1718 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN); 1719 } 1720 1721 static void 1722 mt7603_adjust_sensitivity(struct mt7603_dev *dev) 1723 { 1724 u32 agc0 = dev->agc0, agc3 = dev->agc3; 1725 u32 adj; 1726 1727 if (!dev->sensitivity || dev->sensitivity < -100) { 1728 dev->sensitivity = 0; 1729 } else if (dev->sensitivity <= -84) { 1730 adj = 7 + (dev->sensitivity + 92) / 2; 1731 1732 agc0 = 0x56f0076f; 1733 agc0 |= adj << 12; 1734 agc0 |= adj << 16; 1735 agc3 = 0x81d0d5e3; 1736 } else if (dev->sensitivity <= -72) { 1737 adj = 7 + (dev->sensitivity + 80) / 2; 1738 1739 agc0 = 0x6af0006f; 1740 agc0 |= adj << 8; 1741 agc0 |= adj << 12; 1742 agc0 |= adj << 16; 1743 1744 agc3 = 0x8181d5e3; 1745 } else { 1746 if (dev->sensitivity > -54) 1747 dev->sensitivity = -54; 1748 1749 adj = 7 + (dev->sensitivity + 80) / 2; 1750 1751 agc0 = 0x7ff0000f; 1752 agc0 |= adj << 4; 1753 agc0 |= adj << 8; 1754 agc0 |= adj << 12; 1755 agc0 |= adj << 16; 1756 1757 agc3 = 0x818181e3; 1758 } 1759 1760 mt76_wr(dev, MT_AGC(0), agc0); 1761 mt76_wr(dev, MT_AGC1(0), agc0); 1762 1763 mt76_wr(dev, MT_AGC(3), agc3); 1764 mt76_wr(dev, MT_AGC1(3), agc3); 1765 } 1766 1767 static void 1768 mt7603_false_cca_check(struct mt7603_dev *dev) 1769 { 1770 int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm; 1771 int false_cca; 1772 int min_signal; 1773 u32 val; 1774 1775 if (!dev->dynamic_sensitivity) 1776 return; 1777 1778 val = mt76_rr(dev, MT_PHYCTRL_STAT_PD); 1779 pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val); 1780 pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val); 1781 1782 val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY); 1783 mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val); 1784 mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val); 1785 1786 dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1787 dev->false_cca_cck = pd_cck - mdrdy_cck; 1788 1789 mt7603_cca_stats_reset(dev); 1790 1791 min_signal = mt76_get_min_avg_rssi(&dev->mt76, false); 1792 if (!min_signal) { 1793 dev->sensitivity = 0; 1794 dev->last_cca_adj = jiffies; 1795 goto out; 1796 } 1797 1798 min_signal -= 15; 1799 1800 false_cca = dev->false_cca_ofdm + dev->false_cca_cck; 1801 if (false_cca > 600 && 1802 dev->sensitivity < -100 + dev->sensitivity_limit) { 1803 if (!dev->sensitivity) 1804 dev->sensitivity = -92; 1805 else 1806 dev->sensitivity += 2; 1807 dev->last_cca_adj = jiffies; 1808 } else if (false_cca < 100 || 1809 time_after(jiffies, dev->last_cca_adj + 10 * HZ)) { 1810 dev->last_cca_adj = jiffies; 1811 if (!dev->sensitivity) 1812 goto out; 1813 1814 dev->sensitivity -= 2; 1815 } 1816 1817 if (dev->sensitivity && dev->sensitivity > min_signal) { 1818 dev->sensitivity = min_signal; 1819 dev->last_cca_adj = jiffies; 1820 } 1821 1822 out: 1823 mt7603_adjust_sensitivity(dev); 1824 } 1825 1826 void mt7603_mac_work(struct work_struct *work) 1827 { 1828 struct mt7603_dev *dev = container_of(work, struct mt7603_dev, 1829 mphy.mac_work.work); 1830 bool reset = false; 1831 int i, idx; 1832 1833 mt76_tx_status_check(&dev->mt76, false); 1834 1835 mutex_lock(&dev->mt76.mutex); 1836 1837 dev->mphy.mac_work_count++; 1838 mt76_update_survey(&dev->mphy); 1839 mt7603_edcca_check(dev); 1840 1841 for (i = 0, idx = 0; i < 2; i++) { 1842 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); 1843 1844 dev->mphy.aggr_stats[idx++] += val & 0xffff; 1845 dev->mphy.aggr_stats[idx++] += val >> 16; 1846 } 1847 1848 if (dev->mphy.mac_work_count == 10) 1849 mt7603_false_cca_check(dev); 1850 1851 if (mt7603_watchdog_check(dev, &dev->rx_pse_check, 1852 RESET_CAUSE_RX_PSE_BUSY, 1853 mt7603_rx_pse_busy) || 1854 mt7603_watchdog_check(dev, &dev->beacon_check, 1855 RESET_CAUSE_BEACON_STUCK, 1856 NULL) || 1857 mt7603_watchdog_check(dev, &dev->tx_hang_check, 1858 RESET_CAUSE_TX_HANG, 1859 mt7603_tx_hang) || 1860 mt7603_watchdog_check(dev, &dev->tx_dma_check, 1861 RESET_CAUSE_TX_BUSY, 1862 mt7603_tx_dma_busy) || 1863 mt7603_watchdog_check(dev, &dev->rx_dma_check, 1864 RESET_CAUSE_RX_BUSY, 1865 mt7603_rx_dma_busy) || 1866 mt7603_watchdog_check(dev, &dev->mcu_hang, 1867 RESET_CAUSE_MCU_HANG, 1868 NULL) || 1869 dev->reset_cause[RESET_CAUSE_RESET_FAILED]) { 1870 dev->beacon_check = 0; 1871 dev->tx_dma_check = 0; 1872 dev->tx_hang_check = 0; 1873 dev->rx_dma_check = 0; 1874 dev->rx_pse_check = 0; 1875 dev->mcu_hang = 0; 1876 dev->rx_dma_idx = ~0; 1877 memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx)); 1878 reset = true; 1879 dev->mphy.mac_work_count = 0; 1880 } 1881 1882 if (dev->mphy.mac_work_count >= 10) 1883 dev->mphy.mac_work_count = 0; 1884 1885 mutex_unlock(&dev->mt76.mutex); 1886 1887 if (reset) 1888 mt7603_mac_watchdog_reset(dev); 1889 1890 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1891 msecs_to_jiffies(MT7603_WATCHDOG_TIME)); 1892 } 1893