1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
8 #include "coredump.h"
9 #include "mt7996.h"
10 #include "../dma.h"
11 #include "mac.h"
12 #include "mcu.h"
13 #if defined(__FreeBSD__)
14 #include <linux/delay.h>
15 #endif
16
17 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
18
19 static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
20 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
21 .radar_pattern = {
22 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
23 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
24 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
25 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
26 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
27 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
28 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
29 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
30 },
31 };
32
33 static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
34 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
35 .radar_pattern = {
36 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
37 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
38 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
39 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
40 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
41 },
42 };
43
44 static const struct mt7996_dfs_radar_spec jp_radar_specs = {
45 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
46 .radar_pattern = {
47 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
48 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
49 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
50 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
51 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
52 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
53 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
54 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
55 },
56 };
57
mt7996_rx_get_wcid(struct mt7996_dev * dev,u16 idx,bool unicast)58 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
59 u16 idx, bool unicast)
60 {
61 struct mt7996_sta *sta;
62 struct mt76_wcid *wcid;
63
64 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
65 return NULL;
66
67 wcid = rcu_dereference(dev->mt76.wcid[idx]);
68 if (unicast || !wcid)
69 return wcid;
70
71 if (!wcid->sta)
72 return NULL;
73
74 sta = container_of(wcid, struct mt7996_sta, wcid);
75 if (!sta->vif)
76 return NULL;
77
78 return &sta->vif->deflink.sta.wcid;
79 }
80
mt7996_mac_wtbl_update(struct mt7996_dev * dev,int idx,u32 mask)81 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
82 {
83 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
84 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
85
86 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
87 0, 5000);
88 }
89
mt7996_mac_wtbl_lmac_addr(struct mt7996_dev * dev,u16 wcid,u8 dw)90 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
91 {
92 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
93 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
94
95 return MT_WTBL_LMAC_OFFS(wcid, dw);
96 }
97
mt7996_mac_sta_poll(struct mt7996_dev * dev)98 static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
99 {
100 static const u8 ac_to_tid[] = {
101 [IEEE80211_AC_BE] = 0,
102 [IEEE80211_AC_BK] = 1,
103 [IEEE80211_AC_VI] = 4,
104 [IEEE80211_AC_VO] = 6
105 };
106 struct ieee80211_sta *sta;
107 struct mt7996_sta *msta;
108 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
109 LIST_HEAD(sta_poll_list);
110 int i;
111
112 spin_lock_bh(&dev->mt76.sta_poll_lock);
113 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
114 spin_unlock_bh(&dev->mt76.sta_poll_lock);
115
116 rcu_read_lock();
117
118 while (true) {
119 bool clear = false;
120 u32 addr, val;
121 u16 idx;
122 s8 rssi[4];
123
124 spin_lock_bh(&dev->mt76.sta_poll_lock);
125 if (list_empty(&sta_poll_list)) {
126 spin_unlock_bh(&dev->mt76.sta_poll_lock);
127 break;
128 }
129 msta = list_first_entry(&sta_poll_list,
130 struct mt7996_sta, wcid.poll_list);
131 list_del_init(&msta->wcid.poll_list);
132 spin_unlock_bh(&dev->mt76.sta_poll_lock);
133
134 idx = msta->wcid.idx;
135
136 /* refresh peer's airtime reporting */
137 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
138
139 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
140 u32 tx_last = msta->airtime_ac[i];
141 u32 rx_last = msta->airtime_ac[i + 4];
142
143 msta->airtime_ac[i] = mt76_rr(dev, addr);
144 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
145
146 tx_time[i] = msta->airtime_ac[i] - tx_last;
147 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
148
149 if ((tx_last | rx_last) & BIT(30))
150 clear = true;
151
152 addr += 8;
153 }
154
155 if (clear) {
156 mt7996_mac_wtbl_update(dev, idx,
157 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
158 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
159 }
160
161 if (!msta->wcid.sta)
162 continue;
163
164 sta = container_of((void *)msta, struct ieee80211_sta,
165 drv_priv);
166 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
167 u8 q = mt76_connac_lmac_mapping(i);
168 u32 tx_cur = tx_time[q];
169 u32 rx_cur = rx_time[q];
170 u8 tid = ac_to_tid[i];
171
172 if (!tx_cur && !rx_cur)
173 continue;
174
175 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
176 }
177
178 /* get signal strength of resp frames (CTS/BA/ACK) */
179 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
180 val = mt76_rr(dev, addr);
181
182 rssi[0] = to_rssi(GENMASK(7, 0), val);
183 rssi[1] = to_rssi(GENMASK(15, 8), val);
184 rssi[2] = to_rssi(GENMASK(23, 16), val);
185 rssi[3] = to_rssi(GENMASK(31, 14), val);
186
187 msta->ack_signal =
188 mt76_rx_signal(msta->vif->deflink.phy->mt76->antenna_mask, rssi);
189
190 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
191 }
192
193 rcu_read_unlock();
194 }
195
mt7996_mac_enable_rtscts(struct mt7996_dev * dev,struct ieee80211_vif * vif,bool enable)196 void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
197 struct ieee80211_vif *vif, bool enable)
198 {
199 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
200 u32 addr;
201
202 addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->deflink.sta.wcid.idx, 5);
203 if (enable)
204 mt76_set(dev, addr, BIT(5));
205 else
206 mt76_clear(dev, addr, BIT(5));
207 }
208
209 /* The HW does not translate the mac header to 802.3 for mesh point */
mt7996_reverse_frag0_hdr_trans(struct sk_buff * skb,u16 hdr_gap)210 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
211 {
212 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
213 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
214 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
215 __le32 *rxd = (__le32 *)skb->data;
216 struct ieee80211_sta *sta;
217 struct ieee80211_vif *vif;
218 struct ieee80211_hdr hdr;
219 u16 frame_control;
220
221 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
222 MT_RXD3_NORMAL_U2M)
223 return -EINVAL;
224
225 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
226 return -EINVAL;
227
228 if (!msta || !msta->vif)
229 return -EINVAL;
230
231 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
232 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
233
234 /* store the info from RXD and ethhdr to avoid being overridden */
235 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
236 hdr.frame_control = cpu_to_le16(frame_control);
237 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
238 hdr.duration_id = 0;
239
240 ether_addr_copy(hdr.addr1, vif->addr);
241 ether_addr_copy(hdr.addr2, sta->addr);
242 switch (frame_control & (IEEE80211_FCTL_TODS |
243 IEEE80211_FCTL_FROMDS)) {
244 case 0:
245 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
246 break;
247 case IEEE80211_FCTL_FROMDS:
248 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
249 break;
250 case IEEE80211_FCTL_TODS:
251 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
252 break;
253 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
254 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
255 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
256 break;
257 default:
258 return -EINVAL;
259 }
260
261 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
262 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
263 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
264 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
265 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
266 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
267 else
268 skb_pull(skb, 2);
269
270 if (ieee80211_has_order(hdr.frame_control))
271 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
272 IEEE80211_HT_CTL_LEN);
273 if (ieee80211_is_data_qos(hdr.frame_control)) {
274 __le16 qos_ctrl;
275
276 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
277 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
278 IEEE80211_QOS_CTL_LEN);
279 }
280
281 if (ieee80211_has_a4(hdr.frame_control))
282 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
283 else
284 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
285
286 return 0;
287 }
288
289 static int
mt7996_mac_fill_rx_rate(struct mt7996_dev * dev,struct mt76_rx_status * status,struct ieee80211_supported_band * sband,__le32 * rxv,u8 * mode)290 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
291 struct mt76_rx_status *status,
292 struct ieee80211_supported_band *sband,
293 __le32 *rxv, u8 *mode)
294 {
295 u32 v0, v2;
296 u8 stbc, gi, bw, dcm, nss;
297 int i, idx;
298 bool cck = false;
299
300 v0 = le32_to_cpu(rxv[0]);
301 v2 = le32_to_cpu(rxv[2]);
302
303 idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
304 i = idx;
305 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
306
307 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
308 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
309 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
310 dcm = FIELD_GET(MT_PRXV_DCM, v2);
311 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
312
313 switch (*mode) {
314 case MT_PHY_TYPE_CCK:
315 cck = true;
316 fallthrough;
317 case MT_PHY_TYPE_OFDM:
318 i = mt76_get_rate(&dev->mt76, sband, i, cck);
319 break;
320 case MT_PHY_TYPE_HT_GF:
321 case MT_PHY_TYPE_HT:
322 status->encoding = RX_ENC_HT;
323 if (gi)
324 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
325 if (i > 31)
326 return -EINVAL;
327 break;
328 case MT_PHY_TYPE_VHT:
329 status->nss = nss;
330 status->encoding = RX_ENC_VHT;
331 if (gi)
332 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
333 if (i > 11)
334 return -EINVAL;
335 break;
336 case MT_PHY_TYPE_HE_MU:
337 case MT_PHY_TYPE_HE_SU:
338 case MT_PHY_TYPE_HE_EXT_SU:
339 case MT_PHY_TYPE_HE_TB:
340 status->nss = nss;
341 status->encoding = RX_ENC_HE;
342 i &= GENMASK(3, 0);
343
344 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
345 status->he_gi = gi;
346
347 status->he_dcm = dcm;
348 break;
349 case MT_PHY_TYPE_EHT_SU:
350 case MT_PHY_TYPE_EHT_TRIG:
351 case MT_PHY_TYPE_EHT_MU:
352 status->nss = nss;
353 status->encoding = RX_ENC_EHT;
354 i &= GENMASK(3, 0);
355
356 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
357 status->eht.gi = gi;
358 break;
359 default:
360 return -EINVAL;
361 }
362 status->rate_idx = i;
363
364 switch (bw) {
365 case IEEE80211_STA_RX_BW_20:
366 break;
367 case IEEE80211_STA_RX_BW_40:
368 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
369 (idx & MT_PRXV_TX_ER_SU_106T)) {
370 status->bw = RATE_INFO_BW_HE_RU;
371 status->he_ru =
372 NL80211_RATE_INFO_HE_RU_ALLOC_106;
373 } else {
374 status->bw = RATE_INFO_BW_40;
375 }
376 break;
377 case IEEE80211_STA_RX_BW_80:
378 status->bw = RATE_INFO_BW_80;
379 break;
380 case IEEE80211_STA_RX_BW_160:
381 status->bw = RATE_INFO_BW_160;
382 break;
383 /* rxv reports bw 320-1 and 320-2 separately */
384 case IEEE80211_STA_RX_BW_320:
385 case IEEE80211_STA_RX_BW_320 + 1:
386 status->bw = RATE_INFO_BW_320;
387 break;
388 default:
389 return -EINVAL;
390 }
391
392 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
393 if (*mode < MT_PHY_TYPE_HE_SU && gi)
394 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
395
396 return 0;
397 }
398
399 static void
mt7996_wed_check_ppe(struct mt7996_dev * dev,struct mt76_queue * q,struct mt7996_sta * msta,struct sk_buff * skb,u32 info)400 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
401 struct mt7996_sta *msta, struct sk_buff *skb,
402 u32 info)
403 {
404 struct ieee80211_vif *vif;
405 struct wireless_dev *wdev;
406
407 if (!msta || !msta->vif)
408 return;
409
410 if (!mt76_queue_is_wed_rx(q))
411 return;
412
413 if (!(info & MT_DMA_INFO_PPE_VLD))
414 return;
415
416 vif = container_of((void *)msta->vif, struct ieee80211_vif,
417 drv_priv);
418 wdev = ieee80211_vif_to_wdev(vif);
419 skb->dev = wdev->netdev;
420
421 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
422 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
423 FIELD_GET(MT_DMA_PPE_ENTRY, info));
424 }
425
426 static int
mt7996_mac_fill_rx(struct mt7996_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)427 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
428 struct sk_buff *skb, u32 *info)
429 {
430 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
431 struct mt76_phy *mphy = &dev->mt76.phy;
432 struct mt7996_phy *phy = &dev->phy;
433 struct ieee80211_supported_band *sband;
434 __le32 *rxd = (__le32 *)skb->data;
435 __le32 *rxv = NULL;
436 u32 rxd0 = le32_to_cpu(rxd[0]);
437 u32 rxd1 = le32_to_cpu(rxd[1]);
438 u32 rxd2 = le32_to_cpu(rxd[2]);
439 u32 rxd3 = le32_to_cpu(rxd[3]);
440 u32 rxd4 = le32_to_cpu(rxd[4]);
441 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
442 u32 csum_status = *(u32 *)skb->cb;
443 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
444 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
445 bool unicast, insert_ccmp_hdr = false;
446 u8 remove_pad, amsdu_info, band_idx;
447 u8 mode = 0, qos_ctl = 0;
448 bool hdr_trans;
449 u16 hdr_gap;
450 u16 seq_ctrl = 0;
451 __le16 fc = 0;
452 int idx;
453 u8 hw_aggr = false;
454 struct mt7996_sta *msta = NULL;
455
456 hw_aggr = status->aggr;
457 memset(status, 0, sizeof(*status));
458
459 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
460 mphy = dev->mt76.phys[band_idx];
461 phy = mphy->priv;
462 status->phy_idx = mphy->band_idx;
463
464 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
465 return -EINVAL;
466
467 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
468 return -EINVAL;
469
470 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
471 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
472 return -EINVAL;
473
474 /* ICV error or CCMP/BIP/WPI MIC error */
475 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
476 status->flag |= RX_FLAG_ONLY_MONITOR;
477
478 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
479 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
480 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
481
482 if (status->wcid) {
483 msta = container_of(status->wcid, struct mt7996_sta, wcid);
484 mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
485 }
486
487 status->freq = mphy->chandef.chan->center_freq;
488 status->band = mphy->chandef.chan->band;
489 if (status->band == NL80211_BAND_5GHZ)
490 sband = &mphy->sband_5g.sband;
491 else if (status->band == NL80211_BAND_6GHZ)
492 sband = &mphy->sband_6g.sband;
493 else
494 sband = &mphy->sband_2g.sband;
495
496 if (!sband->channels)
497 return -EINVAL;
498
499 if ((rxd3 & csum_mask) == csum_mask &&
500 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
501 skb->ip_summed = CHECKSUM_UNNECESSARY;
502
503 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
504 status->flag |= RX_FLAG_FAILED_FCS_CRC;
505
506 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
507 status->flag |= RX_FLAG_MMIC_ERROR;
508
509 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
510 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
511 status->flag |= RX_FLAG_DECRYPTED;
512 status->flag |= RX_FLAG_IV_STRIPPED;
513 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
514 }
515
516 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
517
518 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
519 return -EINVAL;
520
521 rxd += 8;
522 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
523 u32 v0 = le32_to_cpu(rxd[0]);
524 u32 v2 = le32_to_cpu(rxd[2]);
525
526 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
527 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
528 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
529
530 rxd += 4;
531 if ((u8 *)rxd - skb->data >= skb->len)
532 return -EINVAL;
533 }
534
535 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
536 u8 *data = (u8 *)rxd;
537
538 if (status->flag & RX_FLAG_DECRYPTED) {
539 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
540 case MT_CIPHER_AES_CCMP:
541 case MT_CIPHER_CCMP_CCX:
542 case MT_CIPHER_CCMP_256:
543 insert_ccmp_hdr =
544 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
545 fallthrough;
546 case MT_CIPHER_TKIP:
547 case MT_CIPHER_TKIP_NO_MIC:
548 case MT_CIPHER_GCMP:
549 case MT_CIPHER_GCMP_256:
550 status->iv[0] = data[5];
551 status->iv[1] = data[4];
552 status->iv[2] = data[3];
553 status->iv[3] = data[2];
554 status->iv[4] = data[1];
555 status->iv[5] = data[0];
556 break;
557 default:
558 break;
559 }
560 }
561 rxd += 4;
562 if ((u8 *)rxd - skb->data >= skb->len)
563 return -EINVAL;
564 }
565
566 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
567 status->timestamp = le32_to_cpu(rxd[0]);
568 status->flag |= RX_FLAG_MACTIME_START;
569
570 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
571 status->flag |= RX_FLAG_AMPDU_DETAILS;
572
573 /* all subframes of an A-MPDU have the same timestamp */
574 if (phy->rx_ampdu_ts != status->timestamp) {
575 if (!++phy->ampdu_ref)
576 phy->ampdu_ref++;
577 }
578 phy->rx_ampdu_ts = status->timestamp;
579
580 status->ampdu_ref = phy->ampdu_ref;
581 }
582
583 rxd += 4;
584 if ((u8 *)rxd - skb->data >= skb->len)
585 return -EINVAL;
586 }
587
588 /* RXD Group 3 - P-RXV */
589 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
590 u32 v3;
591 int ret;
592
593 rxv = rxd;
594 rxd += 4;
595 if ((u8 *)rxd - skb->data >= skb->len)
596 return -EINVAL;
597
598 v3 = le32_to_cpu(rxv[3]);
599
600 status->chains = mphy->antenna_mask;
601 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
602 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
603 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
604 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
605
606 /* RXD Group 5 - C-RXV */
607 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
608 rxd += 24;
609 if ((u8 *)rxd - skb->data >= skb->len)
610 return -EINVAL;
611 }
612
613 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
614 if (ret < 0)
615 return ret;
616 }
617
618 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
619 status->amsdu = !!amsdu_info;
620 if (status->amsdu) {
621 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
622 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
623 }
624
625 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
626 if (hdr_trans && ieee80211_has_morefrags(fc)) {
627 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
628 return -EINVAL;
629 hdr_trans = false;
630 } else {
631 int pad_start = 0;
632
633 skb_pull(skb, hdr_gap);
634 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
635 pad_start = ieee80211_get_hdrlen_from_skb(skb);
636 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
637 /* When header translation failure is indicated,
638 * the hardware will insert an extra 2-byte field
639 * containing the data length after the protocol
640 * type field. This happens either when the LLC-SNAP
641 * pattern did not match, or if a VLAN header was
642 * detected.
643 */
644 pad_start = 12;
645 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
646 pad_start += 4;
647 else
648 pad_start = 0;
649 }
650
651 if (pad_start) {
652 memmove(skb->data + 2, skb->data, pad_start);
653 skb_pull(skb, 2);
654 }
655 }
656
657 if (!hdr_trans) {
658 struct ieee80211_hdr *hdr;
659
660 if (insert_ccmp_hdr) {
661 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
662
663 mt76_insert_ccmp_hdr(skb, key_id);
664 }
665
666 hdr = mt76_skb_get_hdr(skb);
667 fc = hdr->frame_control;
668 if (ieee80211_is_data_qos(fc)) {
669 u8 *qos = ieee80211_get_qos_ctl(hdr);
670
671 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
672 qos_ctl = *qos;
673
674 /* Mesh DA/SA/Length will be stripped after hardware
675 * de-amsdu, so here needs to clear amsdu present bit
676 * to mark it as a normal mesh frame.
677 */
678 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
679 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
680 }
681 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
682 } else {
683 status->flag |= RX_FLAG_8023;
684 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
685 *info);
686 }
687
688 if (rxv && !(status->flag & RX_FLAG_8023)) {
689 switch (status->encoding) {
690 case RX_ENC_EHT:
691 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
692 break;
693 case RX_ENC_HE:
694 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
695 break;
696 default:
697 break;
698 }
699 }
700
701 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
702 return 0;
703
704 status->aggr = unicast &&
705 !ieee80211_is_qos_nullfunc(fc);
706 status->qos_ctl = qos_ctl;
707 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
708
709 return 0;
710 }
711
712 static void
mt7996_mac_write_txwi_8023(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)713 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
714 struct sk_buff *skb, struct mt76_wcid *wcid)
715 {
716 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
717 u8 fc_type, fc_stype;
718 u16 ethertype;
719 bool wmm = false;
720 u32 val;
721
722 if (wcid->sta) {
723 struct ieee80211_sta *sta;
724
725 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
726 wmm = sta->wme;
727 }
728
729 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
730 FIELD_PREP(MT_TXD1_TID, tid);
731
732 ethertype = get_unaligned_be16(&skb->data[12]);
733 if (ethertype >= ETH_P_802_3_MIN)
734 val |= MT_TXD1_ETH_802_3;
735
736 txwi[1] |= cpu_to_le32(val);
737
738 fc_type = IEEE80211_FTYPE_DATA >> 2;
739 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
740
741 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
742 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
743
744 txwi[2] |= cpu_to_le32(val);
745
746 if (wcid->amsdu)
747 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
748 }
749
750 static void
mt7996_mac_write_txwi_80211(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key)751 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
752 struct sk_buff *skb, struct ieee80211_key_conf *key)
753 {
754 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
755 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
756 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
757 bool multicast = is_multicast_ether_addr(hdr->addr1);
758 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
759 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
760 u8 fc_type, fc_stype;
761 u32 val;
762
763 if (ieee80211_is_action(fc) &&
764 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
765 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
766 tid = MT_TX_ADDBA;
767 else if (ieee80211_is_mgmt(hdr->frame_control))
768 tid = MT_TX_NORMAL;
769
770 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
771 FIELD_PREP(MT_TXD1_HDR_INFO,
772 ieee80211_get_hdrlen_from_skb(skb) / 2) |
773 FIELD_PREP(MT_TXD1_TID, tid);
774
775 if (!ieee80211_is_data(fc) || multicast ||
776 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
777 val |= MT_TXD1_FIXED_RATE;
778
779 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
780 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
781 val |= MT_TXD1_BIP;
782 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
783 }
784
785 txwi[1] |= cpu_to_le32(val);
786
787 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
788 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
789
790 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
791 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
792
793 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
794 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
795 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
796 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
797 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
798 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
799 else
800 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
801
802 txwi[2] |= cpu_to_le32(val);
803
804 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
805 if (ieee80211_is_beacon(fc)) {
806 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
807 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
808 }
809
810 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
811 u16 seqno = le16_to_cpu(sc);
812
813 if (ieee80211_is_back_req(hdr->frame_control)) {
814 struct ieee80211_bar *bar;
815
816 bar = (struct ieee80211_bar *)skb->data;
817 seqno = le16_to_cpu(bar->start_seq_num);
818 }
819
820 val = MT_TXD3_SN_VALID |
821 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
822 txwi[3] |= cpu_to_le32(val);
823 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
824 }
825 }
826
mt7996_mac_write_txwi(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,int pid,enum mt76_txq_id qid,u32 changed)827 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
828 struct sk_buff *skb, struct mt76_wcid *wcid,
829 struct ieee80211_key_conf *key, int pid,
830 enum mt76_txq_id qid, u32 changed)
831 {
832 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
833 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
834 struct ieee80211_vif *vif = info->control.vif;
835 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
836 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
837 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
838 struct mt76_vif_link *mvif;
839 u16 tx_count = 15;
840 u32 val;
841 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
842 BSS_CHANGED_FILS_DISCOVERY));
843 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
844 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
845
846 mvif = vif ? (struct mt76_vif_link *)vif->drv_priv : NULL;
847 if (mvif) {
848 omac_idx = mvif->omac_idx;
849 wmm_idx = mvif->wmm_idx;
850 band_idx = mvif->band_idx;
851 }
852
853 if (inband_disc) {
854 p_fmt = MT_TX_TYPE_FW;
855 q_idx = MT_LMAC_ALTX0;
856 } else if (beacon) {
857 p_fmt = MT_TX_TYPE_FW;
858 q_idx = MT_LMAC_BCN0;
859 } else if (qid >= MT_TXQ_PSD) {
860 p_fmt = MT_TX_TYPE_CT;
861 q_idx = MT_LMAC_ALTX0;
862 } else {
863 p_fmt = MT_TX_TYPE_CT;
864 q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
865 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
866 }
867
868 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
869 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
870 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
871 txwi[0] = cpu_to_le32(val);
872
873 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
874 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
875
876 if (band_idx)
877 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
878
879 txwi[1] = cpu_to_le32(val);
880 txwi[2] = 0;
881
882 val = MT_TXD3_SW_POWER_MGMT |
883 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
884 if (key)
885 val |= MT_TXD3_PROTECT_FRAME;
886 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
887 val |= MT_TXD3_NO_ACK;
888
889 txwi[3] = cpu_to_le32(val);
890 txwi[4] = 0;
891
892 val = FIELD_PREP(MT_TXD5_PID, pid);
893 if (pid >= MT_PACKET_ID_FIRST)
894 val |= MT_TXD5_TX_STATUS_HOST;
895 txwi[5] = cpu_to_le32(val);
896
897 val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
898 if (is_mt7996(&dev->mt76))
899 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
900 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
901 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
902
903 txwi[6] = cpu_to_le32(val);
904 txwi[7] = 0;
905
906 if (is_8023)
907 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
908 else
909 mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
910
911 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
912 bool mcast = ieee80211_is_data(hdr->frame_control) &&
913 is_multicast_ether_addr(hdr->addr1);
914 u8 idx = MT7996_BASIC_RATES_TBL;
915
916 if (mvif) {
917 if (mcast && mvif->mcast_rates_idx)
918 idx = mvif->mcast_rates_idx;
919 else if (beacon && mvif->beacon_rates_idx)
920 idx = mvif->beacon_rates_idx;
921 else
922 idx = mvif->basic_rates_idx;
923 }
924
925 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
926 txwi[6] |= cpu_to_le32(val);
927 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
928 }
929 }
930
mt7996_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)931 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
932 enum mt76_txq_id qid, struct mt76_wcid *wcid,
933 struct ieee80211_sta *sta,
934 struct mt76_tx_info *tx_info)
935 {
936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
937 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
938 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
939 struct ieee80211_key_conf *key = info->control.hw_key;
940 struct ieee80211_vif *vif = info->control.vif;
941 struct mt76_connac_txp_common *txp;
942 struct mt76_txwi_cache *t;
943 int id, i, pid, nbuf = tx_info->nbuf - 1;
944 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
945 u8 *txwi = (u8 *)txwi_ptr;
946
947 if (unlikely(tx_info->skb->len <= ETH_HLEN))
948 return -EINVAL;
949
950 if (!wcid)
951 wcid = &dev->mt76.global_wcid;
952
953 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
954 t->skb = tx_info->skb;
955
956 id = mt76_token_consume(mdev, &t);
957 if (id < 0)
958 return id;
959
960 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
961 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
962 pid, qid, 0);
963
964 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
965 for (i = 0; i < nbuf; i++) {
966 u16 len;
967
968 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
969 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
970 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
971 tx_info->buf[i + 1].addr >> 32);
972 #endif
973
974 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
975 txp->fw.len[i] = cpu_to_le16(len);
976 }
977 txp->fw.nbuf = nbuf;
978
979 txp->fw.flags =
980 cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD);
981
982 if (!key)
983 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
984
985 if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
986 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
987
988 if (vif) {
989 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
990
991 txp->fw.bss_idx = mvif->deflink.mt76.idx;
992 }
993
994 txp->fw.token = cpu_to_le16(id);
995 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
996
997 tx_info->skb = NULL;
998
999 /* pass partial skb header to fw */
1000 tx_info->buf[1].len = MT_CT_PARSE_LEN;
1001 tx_info->buf[1].skip_unmap = true;
1002 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1003
1004 return 0;
1005 }
1006
mt7996_wed_init_buf(void * ptr,dma_addr_t phys,int token_id)1007 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
1008 {
1009 #if defined(__linux__)
1010 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
1011 #elif defined(__FreeBSD__)
1012 struct mt76_connac_fw_txp *txp = (void *)((u8 *)ptr + MT_TXD_SIZE);
1013 #endif
1014 __le32 *txwi = ptr;
1015 u32 val;
1016
1017 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
1018
1019 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
1020 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
1021 txwi[0] = cpu_to_le32(val);
1022
1023 val = BIT(31) |
1024 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
1025 txwi[1] = cpu_to_le32(val);
1026
1027 txp->token = cpu_to_le16(token_id);
1028 txp->nbuf = 1;
1029 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
1030
1031 return MT_TXD_SIZE + sizeof(*txp);
1032 }
1033
1034 static void
mt7996_tx_check_aggr(struct ieee80211_sta * sta,struct sk_buff * skb)1035 mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
1036 {
1037 struct mt7996_sta *msta;
1038 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1039 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1040 u16 fc, tid;
1041
1042 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
1043 return;
1044
1045 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1046 if (tid >= 6) /* skip VO queue */
1047 return;
1048
1049 if (is_8023) {
1050 fc = IEEE80211_FTYPE_DATA |
1051 (sta->wme ? IEEE80211_STYPE_QOS_DATA : IEEE80211_STYPE_DATA);
1052 } else {
1053 /* No need to get precise TID for Action/Management Frame,
1054 * since it will not meet the following Frame Control
1055 * condition anyway.
1056 */
1057
1058 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1059
1060 fc = le16_to_cpu(hdr->frame_control) &
1061 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
1062 }
1063
1064 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1065 return;
1066
1067 msta = (struct mt7996_sta *)sta->drv_priv;
1068 if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
1069 ieee80211_start_tx_ba_session(sta, tid, 0);
1070 }
1071
1072 static void
mt7996_txwi_free(struct mt7996_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_sta * sta,struct list_head * free_list)1073 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1074 struct ieee80211_sta *sta, struct list_head *free_list)
1075 {
1076 struct mt76_dev *mdev = &dev->mt76;
1077 struct mt76_wcid *wcid;
1078 __le32 *txwi;
1079 u16 wcid_idx;
1080
1081 mt76_connac_txp_skb_unmap(mdev, t);
1082 if (!t->skb)
1083 goto out;
1084
1085 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1086 if (sta) {
1087 wcid = (struct mt76_wcid *)sta->drv_priv;
1088 wcid_idx = wcid->idx;
1089
1090 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1091 mt7996_tx_check_aggr(sta, t->skb);
1092 } else {
1093 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
1094 }
1095
1096 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1097
1098 out:
1099 t->skb = NULL;
1100 mt76_put_txwi(mdev, t);
1101 }
1102
1103 static void
mt7996_mac_tx_free(struct mt7996_dev * dev,void * data,int len)1104 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1105 {
1106 __le32 *tx_free = (__le32 *)data, *cur_info;
1107 struct mt76_dev *mdev = &dev->mt76;
1108 struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1109 struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1110 struct mt76_txwi_cache *txwi;
1111 struct ieee80211_sta *sta = NULL;
1112 struct mt76_wcid *wcid = NULL;
1113 LIST_HEAD(free_list);
1114 struct sk_buff *skb, *tmp;
1115 #if defined(__linux__)
1116 void *end = data + len;
1117 #elif defined(__FreeBSD__)
1118 void *end = (u8 *)data + len;
1119 #endif
1120 bool wake = false;
1121 u16 total, count = 0;
1122
1123 /* clean DMA queues and unmap buffers first */
1124 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1125 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1126 if (phy2) {
1127 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1128 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1129 }
1130 if (phy3) {
1131 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1132 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1133 }
1134
1135 if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 5))
1136 return;
1137
1138 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1139 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1140 u32 msdu, info;
1141 u8 i;
1142
1143 if (WARN_ON_ONCE((void *)cur_info >= end))
1144 return;
1145 /* 1'b1: new wcid pair.
1146 * 1'b0: msdu_id with the same 'wcid pair' as above.
1147 */
1148 info = le32_to_cpu(*cur_info);
1149 if (info & MT_TXFREE_INFO_PAIR) {
1150 struct mt7996_sta *msta;
1151 u16 idx;
1152
1153 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1154 wcid = rcu_dereference(dev->mt76.wcid[idx]);
1155 sta = wcid_to_sta(wcid);
1156 if (!sta)
1157 continue;
1158
1159 msta = container_of(wcid, struct mt7996_sta, wcid);
1160 mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
1161 continue;
1162 } else if (info & MT_TXFREE_INFO_HEADER) {
1163 u32 tx_retries = 0, tx_failed = 0;
1164
1165 if (!wcid)
1166 continue;
1167
1168 tx_retries =
1169 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1170 tx_failed = tx_retries +
1171 !!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1172
1173 wcid->stats.tx_retries += tx_retries;
1174 wcid->stats.tx_failed += tx_failed;
1175 continue;
1176 }
1177
1178 for (i = 0; i < 2; i++) {
1179 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1180 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1181 continue;
1182
1183 count++;
1184 txwi = mt76_token_release(mdev, msdu, &wake);
1185 if (!txwi)
1186 continue;
1187
1188 mt7996_txwi_free(dev, txwi, sta, &free_list);
1189 }
1190 }
1191
1192 mt7996_mac_sta_poll(dev);
1193
1194 if (wake)
1195 mt76_set_tx_blocked(&dev->mt76, false);
1196
1197 mt76_worker_schedule(&dev->mt76.tx_worker);
1198
1199 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1200 skb_list_del_init(skb);
1201 napi_consume_skb(skb, 1);
1202 }
1203 }
1204
1205 static bool
mt7996_mac_add_txs_skb(struct mt7996_dev * dev,struct mt76_wcid * wcid,int pid,__le32 * txs_data)1206 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
1207 int pid, __le32 *txs_data)
1208 {
1209 struct mt76_sta_stats *stats = &wcid->stats;
1210 struct ieee80211_supported_band *sband;
1211 struct mt76_dev *mdev = &dev->mt76;
1212 struct mt76_phy *mphy;
1213 struct ieee80211_tx_info *info;
1214 struct sk_buff_head list;
1215 struct rate_info rate = {};
1216 struct sk_buff *skb = NULL;
1217 bool cck = false;
1218 u32 txrate, txs, mode, stbc;
1219
1220 txs = le32_to_cpu(txs_data[0]);
1221
1222 mt76_tx_status_lock(mdev, &list);
1223
1224 /* only report MPDU TXS */
1225 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
1226 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1227 if (skb) {
1228 info = IEEE80211_SKB_CB(skb);
1229 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1230 info->flags |= IEEE80211_TX_STAT_ACK;
1231
1232 info->status.ampdu_len = 1;
1233 info->status.ampdu_ack_len =
1234 !!(info->flags & IEEE80211_TX_STAT_ACK);
1235
1236 info->status.rates[0].idx = -1;
1237 }
1238 }
1239
1240 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
1241 struct ieee80211_sta *sta;
1242 u8 tid;
1243
1244 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1245 tid = FIELD_GET(MT_TXS0_TID, txs);
1246 ieee80211_refresh_tx_agg_session_timer(sta, tid);
1247 }
1248
1249 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1250
1251 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1252 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1253 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1254
1255 if (stbc && rate.nss > 1)
1256 rate.nss >>= 1;
1257
1258 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1259 stats->tx_nss[rate.nss - 1]++;
1260 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1261 stats->tx_mcs[rate.mcs]++;
1262
1263 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1264 switch (mode) {
1265 case MT_PHY_TYPE_CCK:
1266 cck = true;
1267 fallthrough;
1268 case MT_PHY_TYPE_OFDM:
1269 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1270
1271 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1272 sband = &mphy->sband_5g.sband;
1273 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1274 sband = &mphy->sband_6g.sband;
1275 else
1276 sband = &mphy->sband_2g.sband;
1277
1278 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1279 rate.legacy = sband->bitrates[rate.mcs].bitrate;
1280 break;
1281 case MT_PHY_TYPE_HT:
1282 case MT_PHY_TYPE_HT_GF:
1283 if (rate.mcs > 31)
1284 goto out;
1285
1286 rate.flags = RATE_INFO_FLAGS_MCS;
1287 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1288 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1289 break;
1290 case MT_PHY_TYPE_VHT:
1291 if (rate.mcs > 9)
1292 goto out;
1293
1294 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1295 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1296 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1297 break;
1298 case MT_PHY_TYPE_HE_SU:
1299 case MT_PHY_TYPE_HE_EXT_SU:
1300 case MT_PHY_TYPE_HE_TB:
1301 case MT_PHY_TYPE_HE_MU:
1302 if (rate.mcs > 11)
1303 goto out;
1304
1305 rate.he_gi = wcid->rate.he_gi;
1306 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1307 rate.flags = RATE_INFO_FLAGS_HE_MCS;
1308 break;
1309 case MT_PHY_TYPE_EHT_SU:
1310 case MT_PHY_TYPE_EHT_TRIG:
1311 case MT_PHY_TYPE_EHT_MU:
1312 if (rate.mcs > 13)
1313 goto out;
1314
1315 rate.eht_gi = wcid->rate.eht_gi;
1316 rate.flags = RATE_INFO_FLAGS_EHT_MCS;
1317 break;
1318 default:
1319 goto out;
1320 }
1321
1322 stats->tx_mode[mode]++;
1323
1324 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1325 case IEEE80211_STA_RX_BW_320:
1326 rate.bw = RATE_INFO_BW_320;
1327 stats->tx_bw[4]++;
1328 break;
1329 case IEEE80211_STA_RX_BW_160:
1330 rate.bw = RATE_INFO_BW_160;
1331 stats->tx_bw[3]++;
1332 break;
1333 case IEEE80211_STA_RX_BW_80:
1334 rate.bw = RATE_INFO_BW_80;
1335 stats->tx_bw[2]++;
1336 break;
1337 case IEEE80211_STA_RX_BW_40:
1338 rate.bw = RATE_INFO_BW_40;
1339 stats->tx_bw[1]++;
1340 break;
1341 default:
1342 rate.bw = RATE_INFO_BW_20;
1343 stats->tx_bw[0]++;
1344 break;
1345 }
1346 wcid->rate = rate;
1347
1348 out:
1349 if (skb)
1350 mt76_tx_status_skb_done(mdev, skb, &list);
1351 mt76_tx_status_unlock(mdev, &list);
1352
1353 return !!skb;
1354 }
1355
mt7996_mac_add_txs(struct mt7996_dev * dev,void * data)1356 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1357 {
1358 struct mt7996_sta *msta = NULL;
1359 struct mt76_wcid *wcid;
1360 __le32 *txs_data = data;
1361 u16 wcidx;
1362 u8 pid;
1363
1364 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1365 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1366
1367 if (pid < MT_PACKET_ID_NO_SKB)
1368 return;
1369
1370 if (wcidx >= mt7996_wtbl_size(dev))
1371 return;
1372
1373 rcu_read_lock();
1374
1375 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1376 if (!wcid)
1377 goto out;
1378
1379 msta = container_of(wcid, struct mt7996_sta, wcid);
1380
1381 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
1382
1383 if (!wcid->sta)
1384 goto out;
1385
1386 mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
1387
1388 out:
1389 rcu_read_unlock();
1390 }
1391
mt7996_rx_check(struct mt76_dev * mdev,void * data,int len)1392 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1393 {
1394 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1395 __le32 *rxd = (__le32 *)data;
1396 __le32 *end = (__le32 *)&rxd[len / 4];
1397 enum rx_pkt_type type;
1398
1399 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1400 if (type != PKT_TYPE_NORMAL) {
1401 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1402
1403 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1404 MT_RXD0_SW_PKT_TYPE_FRAME))
1405 return true;
1406 }
1407
1408 switch (type) {
1409 case PKT_TYPE_TXRX_NOTIFY:
1410 mt7996_mac_tx_free(dev, data, len);
1411 return false;
1412 case PKT_TYPE_TXS:
1413 for (rxd += 4; rxd + 8 <= end; rxd += 8)
1414 mt7996_mac_add_txs(dev, rxd);
1415 return false;
1416 case PKT_TYPE_RX_FW_MONITOR:
1417 #if defined(CONFIG_MT7996_DEBUGFS)
1418 mt7996_debugfs_rx_fw_monitor(dev, data, len);
1419 #endif
1420 return false;
1421 default:
1422 return true;
1423 }
1424 }
1425
mt7996_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1426 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1427 struct sk_buff *skb, u32 *info)
1428 {
1429 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1430 __le32 *rxd = (__le32 *)skb->data;
1431 __le32 *end = (__le32 *)&skb->data[skb->len];
1432 enum rx_pkt_type type;
1433
1434 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1435 if (type != PKT_TYPE_NORMAL) {
1436 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1437
1438 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1439 MT_RXD0_SW_PKT_TYPE_FRAME))
1440 type = PKT_TYPE_NORMAL;
1441 }
1442
1443 switch (type) {
1444 case PKT_TYPE_TXRX_NOTIFY:
1445 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
1446 q == MT_RXQ_TXFREE_BAND2) {
1447 dev_kfree_skb(skb);
1448 break;
1449 }
1450
1451 mt7996_mac_tx_free(dev, skb->data, skb->len);
1452 napi_consume_skb(skb, 1);
1453 break;
1454 case PKT_TYPE_RX_EVENT:
1455 mt7996_mcu_rx_event(dev, skb);
1456 break;
1457 case PKT_TYPE_TXS:
1458 for (rxd += 4; rxd + 8 <= end; rxd += 8)
1459 mt7996_mac_add_txs(dev, rxd);
1460 dev_kfree_skb(skb);
1461 break;
1462 case PKT_TYPE_RX_FW_MONITOR:
1463 #if defined(CONFIG_MT7996_DEBUGFS)
1464 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1465 #endif
1466 dev_kfree_skb(skb);
1467 break;
1468 case PKT_TYPE_NORMAL:
1469 if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
1470 mt76_rx(&dev->mt76, q, skb);
1471 return;
1472 }
1473 fallthrough;
1474 default:
1475 dev_kfree_skb(skb);
1476 break;
1477 }
1478 }
1479
mt7996_mac_cca_stats_reset(struct mt7996_phy * phy)1480 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1481 {
1482 struct mt7996_dev *dev = phy->dev;
1483 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
1484
1485 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
1486 mt76_set(dev, reg, BIT(11) | BIT(9));
1487 }
1488
mt7996_mac_reset_counters(struct mt7996_phy * phy)1489 void mt7996_mac_reset_counters(struct mt7996_phy *phy)
1490 {
1491 struct mt7996_dev *dev = phy->dev;
1492 u8 band_idx = phy->mt76->band_idx;
1493 int i;
1494
1495 for (i = 0; i < 16; i++)
1496 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
1497
1498 phy->mt76->survey_time = ktime_get_boottime();
1499
1500 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1501
1502 /* reset airtime counters */
1503 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
1504 MT_WF_RMAC_MIB_RXTIME_CLR);
1505
1506 mt7996_mcu_get_chan_mib_info(phy, true);
1507 }
1508
mt7996_mac_set_coverage_class(struct mt7996_phy * phy)1509 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
1510 {
1511 s16 coverage_class = phy->coverage_class;
1512 struct mt7996_dev *dev = phy->dev;
1513 struct mt7996_phy *phy2 = mt7996_phy2(dev);
1514 struct mt7996_phy *phy3 = mt7996_phy3(dev);
1515 u32 reg_offset;
1516 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1517 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1518 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1519 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1520 u8 band_idx = phy->mt76->band_idx;
1521 int offset;
1522
1523 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1524 return;
1525
1526 if (phy2)
1527 coverage_class = max_t(s16, dev->phy.coverage_class,
1528 phy2->coverage_class);
1529
1530 if (phy3)
1531 coverage_class = max_t(s16, coverage_class,
1532 phy3->coverage_class);
1533
1534 offset = 3 * coverage_class;
1535 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1536 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1537
1538 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
1539 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
1540 }
1541
mt7996_mac_enable_nf(struct mt7996_dev * dev,u8 band)1542 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
1543 {
1544 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
1545 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
1546 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
1547
1548 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
1549 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
1550 }
1551
1552 static u8
mt7996_phy_get_nf(struct mt7996_phy * phy,u8 band_idx)1553 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
1554 {
1555 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1556 struct mt7996_dev *dev = phy->dev;
1557 u32 val, sum = 0, n = 0;
1558 int ant, i;
1559
1560 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
1561 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
1562
1563 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1564 val = mt76_rr(dev, reg);
1565 sum += val * nf_power[i];
1566 n += val;
1567 }
1568 }
1569
1570 return n ? sum / n : 0;
1571 }
1572
mt7996_update_channel(struct mt76_phy * mphy)1573 void mt7996_update_channel(struct mt76_phy *mphy)
1574 {
1575 struct mt7996_phy *phy = mphy->priv;
1576 struct mt76_channel_state *state = mphy->chan_state;
1577 int nf;
1578
1579 mt7996_mcu_get_chan_mib_info(phy, false);
1580
1581 nf = mt7996_phy_get_nf(phy, mphy->band_idx);
1582 if (!phy->noise)
1583 phy->noise = nf << 4;
1584 else if (nf)
1585 phy->noise += nf - (phy->noise >> 4);
1586
1587 state->noise = -(phy->noise >> 4);
1588 }
1589
1590 static bool
mt7996_wait_reset_state(struct mt7996_dev * dev,u32 state)1591 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
1592 {
1593 bool ret;
1594
1595 ret = wait_event_timeout(dev->reset_wait,
1596 (READ_ONCE(dev->recovery.state) & state),
1597 MT7996_RESET_TIMEOUT);
1598
1599 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1600 return ret;
1601 }
1602
1603 static void
mt7996_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)1604 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1605 {
1606 struct ieee80211_hw *hw = priv;
1607
1608 switch (vif->type) {
1609 case NL80211_IFTYPE_MESH_POINT:
1610 case NL80211_IFTYPE_ADHOC:
1611 case NL80211_IFTYPE_AP:
1612 mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
1613 break;
1614 default:
1615 break;
1616 }
1617 }
1618
1619 static void
mt7996_update_beacons(struct mt7996_dev * dev)1620 mt7996_update_beacons(struct mt7996_dev *dev)
1621 {
1622 struct mt76_phy *phy2, *phy3;
1623
1624 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1625 IEEE80211_IFACE_ITER_RESUME_ALL,
1626 mt7996_update_vif_beacon, dev->mt76.hw);
1627
1628 phy2 = dev->mt76.phys[MT_BAND1];
1629 if (!phy2)
1630 return;
1631
1632 ieee80211_iterate_active_interfaces(phy2->hw,
1633 IEEE80211_IFACE_ITER_RESUME_ALL,
1634 mt7996_update_vif_beacon, phy2->hw);
1635
1636 phy3 = dev->mt76.phys[MT_BAND2];
1637 if (!phy3)
1638 return;
1639
1640 ieee80211_iterate_active_interfaces(phy3->hw,
1641 IEEE80211_IFACE_ITER_RESUME_ALL,
1642 mt7996_update_vif_beacon, phy3->hw);
1643 }
1644
mt7996_tx_token_put(struct mt7996_dev * dev)1645 void mt7996_tx_token_put(struct mt7996_dev *dev)
1646 {
1647 struct mt76_txwi_cache *txwi;
1648 int id;
1649
1650 spin_lock_bh(&dev->mt76.token_lock);
1651 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1652 mt7996_txwi_free(dev, txwi, NULL, NULL);
1653 dev->mt76.token_count--;
1654 }
1655 spin_unlock_bh(&dev->mt76.token_lock);
1656 idr_destroy(&dev->mt76.token);
1657 }
1658
1659 static int
mt7996_mac_restart(struct mt7996_dev * dev)1660 mt7996_mac_restart(struct mt7996_dev *dev)
1661 {
1662 struct mt7996_phy *phy2, *phy3;
1663 struct mt76_dev *mdev = &dev->mt76;
1664 int i, ret;
1665
1666 phy2 = mt7996_phy2(dev);
1667 phy3 = mt7996_phy3(dev);
1668
1669 if (dev->hif2) {
1670 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1671 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1672 }
1673
1674 if (dev_is_pci(mdev->dev)) {
1675 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1676 if (dev->hif2)
1677 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1678 }
1679
1680 set_bit(MT76_RESET, &dev->mphy.state);
1681 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1682 wake_up(&dev->mt76.mcu.wait);
1683 if (phy2)
1684 set_bit(MT76_RESET, &phy2->mt76->state);
1685 if (phy3)
1686 set_bit(MT76_RESET, &phy3->mt76->state);
1687
1688 /* lock/unlock all queues to ensure that no tx is pending */
1689 mt76_txq_schedule_all(&dev->mphy);
1690 if (phy2)
1691 mt76_txq_schedule_all(phy2->mt76);
1692 if (phy3)
1693 mt76_txq_schedule_all(phy3->mt76);
1694
1695 /* disable all tx/rx napi */
1696 mt76_worker_disable(&dev->mt76.tx_worker);
1697 mt76_for_each_q_rx(mdev, i) {
1698 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1699 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1700 continue;
1701
1702 if (mdev->q_rx[i].ndesc)
1703 napi_disable(&dev->mt76.napi[i]);
1704 }
1705 napi_disable(&dev->mt76.tx_napi);
1706
1707 /* token reinit */
1708 mt7996_tx_token_put(dev);
1709 idr_init(&dev->mt76.token);
1710
1711 mt7996_dma_reset(dev, true);
1712
1713 mt76_for_each_q_rx(mdev, i) {
1714 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1715 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1716 continue;
1717
1718 if (mdev->q_rx[i].ndesc) {
1719 napi_enable(&dev->mt76.napi[i]);
1720 local_bh_disable();
1721 napi_schedule(&dev->mt76.napi[i]);
1722 local_bh_enable();
1723 }
1724 }
1725 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1726 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1727
1728 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1729 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1730 if (dev->hif2) {
1731 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1732 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1733 }
1734 if (dev_is_pci(mdev->dev)) {
1735 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1736 if (dev->hif2)
1737 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1738 }
1739
1740 /* load firmware */
1741 ret = mt7996_mcu_init_firmware(dev);
1742 if (ret)
1743 goto out;
1744
1745 /* set the necessary init items */
1746 ret = mt7996_mcu_set_eeprom(dev);
1747 if (ret)
1748 goto out;
1749
1750 mt7996_mac_init(dev);
1751 mt7996_init_txpower(&dev->phy);
1752 mt7996_init_txpower(phy2);
1753 mt7996_init_txpower(phy3);
1754 ret = mt7996_txbf_init(dev);
1755
1756 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1757 ret = mt7996_run(&dev->phy);
1758 if (ret)
1759 goto out;
1760 }
1761
1762 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
1763 ret = mt7996_run(phy2);
1764 if (ret)
1765 goto out;
1766 }
1767
1768 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
1769 ret = mt7996_run(phy3);
1770 if (ret)
1771 goto out;
1772 }
1773
1774 out:
1775 /* reset done */
1776 clear_bit(MT76_RESET, &dev->mphy.state);
1777 if (phy2)
1778 clear_bit(MT76_RESET, &phy2->mt76->state);
1779 if (phy3)
1780 clear_bit(MT76_RESET, &phy3->mt76->state);
1781
1782 napi_enable(&dev->mt76.tx_napi);
1783 local_bh_disable();
1784 napi_schedule(&dev->mt76.tx_napi);
1785 local_bh_enable();
1786
1787 mt76_worker_enable(&dev->mt76.tx_worker);
1788 return ret;
1789 }
1790
1791 static void
mt7996_mac_full_reset(struct mt7996_dev * dev)1792 mt7996_mac_full_reset(struct mt7996_dev *dev)
1793 {
1794 struct mt7996_phy *phy2, *phy3;
1795 int i;
1796
1797 phy2 = mt7996_phy2(dev);
1798 phy3 = mt7996_phy3(dev);
1799 dev->recovery.hw_full_reset = true;
1800
1801 wake_up(&dev->mt76.mcu.wait);
1802 ieee80211_stop_queues(mt76_hw(dev));
1803 if (phy2)
1804 ieee80211_stop_queues(phy2->mt76->hw);
1805 if (phy3)
1806 ieee80211_stop_queues(phy3->mt76->hw);
1807
1808 cancel_work_sync(&dev->wed_rro.work);
1809 cancel_delayed_work_sync(&dev->mphy.mac_work);
1810 if (phy2)
1811 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1812 if (phy3)
1813 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1814
1815 mutex_lock(&dev->mt76.mutex);
1816 for (i = 0; i < 10; i++) {
1817 if (!mt7996_mac_restart(dev))
1818 break;
1819 }
1820 mutex_unlock(&dev->mt76.mutex);
1821
1822 if (i == 10)
1823 dev_err(dev->mt76.dev, "chip full reset failed\n");
1824
1825 ieee80211_restart_hw(mt76_hw(dev));
1826 if (phy2)
1827 ieee80211_restart_hw(phy2->mt76->hw);
1828 if (phy3)
1829 ieee80211_restart_hw(phy3->mt76->hw);
1830
1831 ieee80211_wake_queues(mt76_hw(dev));
1832 if (phy2)
1833 ieee80211_wake_queues(phy2->mt76->hw);
1834 if (phy3)
1835 ieee80211_wake_queues(phy3->mt76->hw);
1836
1837 dev->recovery.hw_full_reset = false;
1838 ieee80211_queue_delayed_work(mt76_hw(dev),
1839 &dev->mphy.mac_work,
1840 MT7996_WATCHDOG_TIME);
1841 if (phy2)
1842 ieee80211_queue_delayed_work(phy2->mt76->hw,
1843 &phy2->mt76->mac_work,
1844 MT7996_WATCHDOG_TIME);
1845 if (phy3)
1846 ieee80211_queue_delayed_work(phy3->mt76->hw,
1847 &phy3->mt76->mac_work,
1848 MT7996_WATCHDOG_TIME);
1849 }
1850
mt7996_mac_reset_work(struct work_struct * work)1851 void mt7996_mac_reset_work(struct work_struct *work)
1852 {
1853 struct mt7996_phy *phy2, *phy3;
1854 struct mt7996_dev *dev;
1855 int i;
1856
1857 dev = container_of(work, struct mt7996_dev, reset_work);
1858 phy2 = mt7996_phy2(dev);
1859 phy3 = mt7996_phy3(dev);
1860
1861 /* chip full reset */
1862 if (dev->recovery.restart) {
1863 /* disable WA/WM WDT */
1864 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1865 MT_MCU_CMD_WDT_MASK);
1866
1867 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1868 dev->recovery.wa_reset_count++;
1869 else
1870 dev->recovery.wm_reset_count++;
1871
1872 mt7996_mac_full_reset(dev);
1873
1874 /* enable mcu irq */
1875 mt7996_irq_enable(dev, MT_INT_MCU_CMD);
1876 mt7996_irq_disable(dev, 0);
1877
1878 /* enable WA/WM WDT */
1879 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1880
1881 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1882 dev->recovery.restart = false;
1883 return;
1884 }
1885
1886 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1887 return;
1888
1889 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
1890 wiphy_name(dev->mt76.hw->wiphy));
1891
1892 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
1893 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
1894
1895 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
1896 mtk_wed_device_stop(&dev->mt76.mmio.wed);
1897
1898 ieee80211_stop_queues(mt76_hw(dev));
1899 if (phy2)
1900 ieee80211_stop_queues(phy2->mt76->hw);
1901 if (phy3)
1902 ieee80211_stop_queues(phy3->mt76->hw);
1903
1904 set_bit(MT76_RESET, &dev->mphy.state);
1905 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1906 wake_up(&dev->mt76.mcu.wait);
1907
1908 cancel_work_sync(&dev->wed_rro.work);
1909 cancel_delayed_work_sync(&dev->mphy.mac_work);
1910 if (phy2) {
1911 set_bit(MT76_RESET, &phy2->mt76->state);
1912 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1913 }
1914 if (phy3) {
1915 set_bit(MT76_RESET, &phy3->mt76->state);
1916 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1917 }
1918 mt76_worker_disable(&dev->mt76.tx_worker);
1919 mt76_for_each_q_rx(&dev->mt76, i) {
1920 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1921 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
1922 continue;
1923
1924 napi_disable(&dev->mt76.napi[i]);
1925 }
1926 napi_disable(&dev->mt76.tx_napi);
1927
1928 mutex_lock(&dev->mt76.mutex);
1929
1930 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1931
1932 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1933 mt7996_dma_reset(dev, false);
1934
1935 mt7996_tx_token_put(dev);
1936 idr_init(&dev->mt76.token);
1937
1938 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1939 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1940 }
1941
1942 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1943 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1944
1945 /* enable DMA Tx/Tx and interrupt */
1946 mt7996_dma_start(dev, false, false);
1947
1948 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
1949 u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
1950 dev->mt76.mmio.irqmask;
1951
1952 if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
1953 wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
1954
1955 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
1956
1957 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
1958 true);
1959 mt7996_irq_enable(dev, wed_irq_mask);
1960 mt7996_irq_disable(dev, 0);
1961 }
1962
1963 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
1964 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
1965 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
1966 MT_INT_TX_RX_DONE_EXT);
1967 }
1968
1969 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1970 clear_bit(MT76_RESET, &dev->mphy.state);
1971 if (phy2)
1972 clear_bit(MT76_RESET, &phy2->mt76->state);
1973 if (phy3)
1974 clear_bit(MT76_RESET, &phy3->mt76->state);
1975
1976 mt76_for_each_q_rx(&dev->mt76, i) {
1977 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1978 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
1979 continue;
1980
1981 napi_enable(&dev->mt76.napi[i]);
1982 local_bh_disable();
1983 napi_schedule(&dev->mt76.napi[i]);
1984 local_bh_enable();
1985 }
1986
1987 tasklet_schedule(&dev->mt76.irq_tasklet);
1988
1989 mt76_worker_enable(&dev->mt76.tx_worker);
1990
1991 napi_enable(&dev->mt76.tx_napi);
1992 local_bh_disable();
1993 napi_schedule(&dev->mt76.tx_napi);
1994 local_bh_enable();
1995
1996 ieee80211_wake_queues(mt76_hw(dev));
1997 if (phy2)
1998 ieee80211_wake_queues(phy2->mt76->hw);
1999 if (phy3)
2000 ieee80211_wake_queues(phy3->mt76->hw);
2001
2002 mutex_unlock(&dev->mt76.mutex);
2003
2004 mt7996_update_beacons(dev);
2005
2006 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
2007 MT7996_WATCHDOG_TIME);
2008 if (phy2)
2009 ieee80211_queue_delayed_work(phy2->mt76->hw,
2010 &phy2->mt76->mac_work,
2011 MT7996_WATCHDOG_TIME);
2012 if (phy3)
2013 ieee80211_queue_delayed_work(phy3->mt76->hw,
2014 &phy3->mt76->mac_work,
2015 MT7996_WATCHDOG_TIME);
2016 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
2017 wiphy_name(dev->mt76.hw->wiphy));
2018 }
2019
2020 /* firmware coredump */
mt7996_mac_dump_work(struct work_struct * work)2021 void mt7996_mac_dump_work(struct work_struct *work)
2022 {
2023 const struct mt7996_mem_region *mem_region;
2024 struct mt7996_crash_data *crash_data;
2025 struct mt7996_dev *dev;
2026 struct mt7996_mem_hdr *hdr;
2027 size_t buf_len;
2028 int i;
2029 u32 num;
2030 u8 *buf;
2031
2032 dev = container_of(work, struct mt7996_dev, dump_work);
2033
2034 mutex_lock(&dev->dump_mutex);
2035
2036 crash_data = mt7996_coredump_new(dev);
2037 if (!crash_data) {
2038 mutex_unlock(&dev->dump_mutex);
2039 goto skip_coredump;
2040 }
2041
2042 mem_region = mt7996_coredump_get_mem_layout(dev, &num);
2043 if (!mem_region || !crash_data->memdump_buf_len) {
2044 mutex_unlock(&dev->dump_mutex);
2045 goto skip_memdump;
2046 }
2047
2048 buf = crash_data->memdump_buf;
2049 buf_len = crash_data->memdump_buf_len;
2050
2051 /* dumping memory content... */
2052 memset(buf, 0, buf_len);
2053 for (i = 0; i < num; i++) {
2054 if (mem_region->len > buf_len) {
2055 dev_warn(dev->mt76.dev, "%s len %zu is too large\n",
2056 mem_region->name, mem_region->len);
2057 break;
2058 }
2059
2060 /* reserve space for the header */
2061 hdr = (void *)buf;
2062 buf += sizeof(*hdr);
2063 buf_len -= sizeof(*hdr);
2064
2065 mt7996_memcpy_fromio(dev, buf, mem_region->start,
2066 mem_region->len);
2067
2068 hdr->start = mem_region->start;
2069 hdr->len = mem_region->len;
2070
2071 if (!mem_region->len)
2072 /* note: the header remains, just with zero length */
2073 break;
2074
2075 buf += mem_region->len;
2076 buf_len -= mem_region->len;
2077
2078 mem_region++;
2079 }
2080
2081 mutex_unlock(&dev->dump_mutex);
2082
2083 skip_memdump:
2084 mt7996_coredump_submit(dev);
2085 skip_coredump:
2086 queue_work(dev->mt76.wq, &dev->reset_work);
2087 }
2088
mt7996_reset(struct mt7996_dev * dev)2089 void mt7996_reset(struct mt7996_dev *dev)
2090 {
2091 if (!dev->recovery.hw_init_done)
2092 return;
2093
2094 if (dev->recovery.hw_full_reset)
2095 return;
2096
2097 /* wm/wa exception: do full recovery */
2098 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
2099 dev->recovery.restart = true;
2100 dev_info(dev->mt76.dev,
2101 "%s indicated firmware crash, attempting recovery\n",
2102 wiphy_name(dev->mt76.hw->wiphy));
2103
2104 mt7996_irq_disable(dev, MT_INT_MCU_CMD);
2105 queue_work(dev->mt76.wq, &dev->dump_work);
2106 return;
2107 }
2108
2109 queue_work(dev->mt76.wq, &dev->reset_work);
2110 wake_up(&dev->reset_wait);
2111 }
2112
mt7996_mac_update_stats(struct mt7996_phy * phy)2113 void mt7996_mac_update_stats(struct mt7996_phy *phy)
2114 {
2115 struct mt76_mib_stats *mib = &phy->mib;
2116 struct mt7996_dev *dev = phy->dev;
2117 u8 band_idx = phy->mt76->band_idx;
2118 u32 cnt;
2119 int i;
2120
2121 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
2122 mib->fcs_err_cnt += cnt;
2123
2124 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
2125 mib->rx_fifo_full_cnt += cnt;
2126
2127 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
2128 mib->rx_mpdu_cnt += cnt;
2129
2130 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
2131 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
2132
2133 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
2134 mib->rx_vector_mismatch_cnt += cnt;
2135
2136 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
2137 mib->rx_delimiter_fail_cnt += cnt;
2138
2139 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
2140 mib->rx_len_mismatch_cnt += cnt;
2141
2142 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
2143 mib->tx_ampdu_cnt += cnt;
2144
2145 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
2146 mib->tx_stop_q_empty_cnt += cnt;
2147
2148 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
2149 mib->tx_mpdu_attempts_cnt += cnt;
2150
2151 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2152 mib->tx_mpdu_success_cnt += cnt;
2153
2154 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2155 mib->rx_ampdu_cnt += cnt;
2156
2157 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2158 mib->rx_ampdu_bytes_cnt += cnt;
2159
2160 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2161 mib->rx_ampdu_valid_subframe_cnt += cnt;
2162
2163 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2164 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2165
2166 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2167 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2168
2169 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2170 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2171
2172 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2173 mib->rx_pfdrop_cnt += cnt;
2174
2175 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2176 mib->rx_vec_queue_overflow_drop_cnt += cnt;
2177
2178 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2179 mib->rx_ba_cnt += cnt;
2180
2181 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2182 mib->tx_bf_ebf_ppdu_cnt += cnt;
2183
2184 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2185 mib->tx_bf_ibf_ppdu_cnt += cnt;
2186
2187 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2188 mib->tx_mu_bf_cnt += cnt;
2189
2190 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2191 mib->tx_mu_mpdu_cnt += cnt;
2192
2193 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2194 mib->tx_mu_acked_mpdu_cnt += cnt;
2195
2196 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2197 mib->tx_su_acked_mpdu_cnt += cnt;
2198
2199 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2200 mib->tx_bf_rx_fb_ht_cnt += cnt;
2201 mib->tx_bf_rx_fb_all_cnt += cnt;
2202
2203 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2204 mib->tx_bf_rx_fb_vht_cnt += cnt;
2205 mib->tx_bf_rx_fb_all_cnt += cnt;
2206
2207 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2208 mib->tx_bf_rx_fb_he_cnt += cnt;
2209 mib->tx_bf_rx_fb_all_cnt += cnt;
2210
2211 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2212 mib->tx_bf_rx_fb_eht_cnt += cnt;
2213 mib->tx_bf_rx_fb_all_cnt += cnt;
2214
2215 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2216 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2217 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2218 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2219
2220 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2221 mib->tx_bf_fb_trig_cnt += cnt;
2222
2223 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2224 mib->tx_bf_fb_cpl_cnt += cnt;
2225
2226 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2227 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2228 mib->tx_amsdu[i] += cnt;
2229 mib->tx_amsdu_cnt += cnt;
2230 }
2231
2232 /* rts count */
2233 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2234 mib->rts_cnt += cnt;
2235
2236 /* rts retry count */
2237 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2238 mib->rts_retries_cnt += cnt;
2239
2240 /* ba miss count */
2241 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2242 mib->ba_miss_cnt += cnt;
2243
2244 /* ack fail count */
2245 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2246 mib->ack_fail_cnt += cnt;
2247
2248 for (i = 0; i < 16; i++) {
2249 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2250 phy->mt76->aggr_stats[i] += cnt;
2251 }
2252 }
2253
mt7996_mac_sta_rc_work(struct work_struct * work)2254 void mt7996_mac_sta_rc_work(struct work_struct *work)
2255 {
2256 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2257 struct ieee80211_sta *sta;
2258 struct ieee80211_vif *vif;
2259 struct mt7996_sta *msta;
2260 u32 changed;
2261 LIST_HEAD(list);
2262
2263 spin_lock_bh(&dev->mt76.sta_poll_lock);
2264 list_splice_init(&dev->sta_rc_list, &list);
2265
2266 while (!list_empty(&list)) {
2267 msta = list_first_entry(&list, struct mt7996_sta, rc_list);
2268 list_del_init(&msta->rc_list);
2269 changed = msta->changed;
2270 msta->changed = 0;
2271 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2272
2273 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
2274 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
2275
2276 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2277 IEEE80211_RC_NSS_CHANGED |
2278 IEEE80211_RC_BW_CHANGED))
2279 mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
2280
2281 if (changed & IEEE80211_RC_SMPS_CHANGED)
2282 mt7996_mcu_set_fixed_field(dev, vif, sta, NULL,
2283 RATE_PARAM_MMPS_UPDATE);
2284
2285 spin_lock_bh(&dev->mt76.sta_poll_lock);
2286 }
2287
2288 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2289 }
2290
mt7996_mac_work(struct work_struct * work)2291 void mt7996_mac_work(struct work_struct *work)
2292 {
2293 struct mt7996_phy *phy;
2294 struct mt76_phy *mphy;
2295
2296 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2297 mac_work.work);
2298 phy = mphy->priv;
2299
2300 mutex_lock(&mphy->dev->mutex);
2301
2302 mt76_update_survey(mphy);
2303 if (++mphy->mac_work_count == 5) {
2304 mphy->mac_work_count = 0;
2305
2306 mt7996_mac_update_stats(phy);
2307
2308 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE);
2309 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
2310 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT);
2311 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT);
2312 }
2313 }
2314
2315 mutex_unlock(&mphy->dev->mutex);
2316
2317 mt76_tx_status_check(mphy->dev, false);
2318
2319 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2320 MT7996_WATCHDOG_TIME);
2321 }
2322
mt7996_dfs_stop_radar_detector(struct mt7996_phy * phy)2323 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2324 {
2325 struct mt7996_dev *dev = phy->dev;
2326
2327 if (phy->rdd_state & BIT(0))
2328 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
2329 MT_RX_SEL0, 0);
2330 if (phy->rdd_state & BIT(1))
2331 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
2332 MT_RX_SEL0, 0);
2333 }
2334
mt7996_dfs_start_rdd(struct mt7996_dev * dev,int chain)2335 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
2336 {
2337 int err, region;
2338
2339 switch (dev->mt76.region) {
2340 case NL80211_DFS_ETSI:
2341 region = 0;
2342 break;
2343 case NL80211_DFS_JP:
2344 region = 2;
2345 break;
2346 case NL80211_DFS_FCC:
2347 default:
2348 region = 1;
2349 break;
2350 }
2351
2352 err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
2353 MT_RX_SEL0, region);
2354 if (err < 0)
2355 return err;
2356
2357 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
2358 MT_RX_SEL0, 1);
2359 }
2360
mt7996_dfs_start_radar_detector(struct mt7996_phy * phy)2361 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2362 {
2363 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2364 struct mt7996_dev *dev = phy->dev;
2365 u8 band_idx = phy->mt76->band_idx;
2366 int err;
2367
2368 /* start CAC */
2369 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
2370 MT_RX_SEL0, 0);
2371 if (err < 0)
2372 return err;
2373
2374 err = mt7996_dfs_start_rdd(dev, band_idx);
2375 if (err < 0)
2376 return err;
2377
2378 phy->rdd_state |= BIT(band_idx);
2379
2380 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2381 chandef->width == NL80211_CHAN_WIDTH_80P80) {
2382 err = mt7996_dfs_start_rdd(dev, 1);
2383 if (err < 0)
2384 return err;
2385
2386 phy->rdd_state |= BIT(1);
2387 }
2388
2389 return 0;
2390 }
2391
2392 static int
mt7996_dfs_init_radar_specs(struct mt7996_phy * phy)2393 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
2394 {
2395 const struct mt7996_dfs_radar_spec *radar_specs;
2396 struct mt7996_dev *dev = phy->dev;
2397 int err, i;
2398
2399 switch (dev->mt76.region) {
2400 case NL80211_DFS_FCC:
2401 radar_specs = &fcc_radar_specs;
2402 err = mt7996_mcu_set_fcc5_lpn(dev, 8);
2403 if (err < 0)
2404 return err;
2405 break;
2406 case NL80211_DFS_ETSI:
2407 radar_specs = &etsi_radar_specs;
2408 break;
2409 case NL80211_DFS_JP:
2410 radar_specs = &jp_radar_specs;
2411 break;
2412 default:
2413 return -EINVAL;
2414 }
2415
2416 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2417 err = mt7996_mcu_set_radar_th(dev, i,
2418 &radar_specs->radar_pattern[i]);
2419 if (err < 0)
2420 return err;
2421 }
2422
2423 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2424 }
2425
mt7996_dfs_init_radar_detector(struct mt7996_phy * phy)2426 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2427 {
2428 struct mt7996_dev *dev = phy->dev;
2429 enum mt76_dfs_state dfs_state, prev_state;
2430 int err;
2431
2432 prev_state = phy->mt76->dfs_state;
2433 dfs_state = mt76_phy_dfs_state(phy->mt76);
2434
2435 if (prev_state == dfs_state)
2436 return 0;
2437
2438 if (prev_state == MT_DFS_STATE_UNKNOWN)
2439 mt7996_dfs_stop_radar_detector(phy);
2440
2441 if (dfs_state == MT_DFS_STATE_DISABLED)
2442 goto stop;
2443
2444 if (prev_state <= MT_DFS_STATE_DISABLED) {
2445 err = mt7996_dfs_init_radar_specs(phy);
2446 if (err < 0)
2447 return err;
2448
2449 err = mt7996_dfs_start_radar_detector(phy);
2450 if (err < 0)
2451 return err;
2452
2453 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2454 }
2455
2456 if (dfs_state == MT_DFS_STATE_CAC)
2457 return 0;
2458
2459 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
2460 phy->mt76->band_idx, MT_RX_SEL0, 0);
2461 if (err < 0) {
2462 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2463 return err;
2464 }
2465
2466 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2467 return 0;
2468
2469 stop:
2470 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
2471 phy->mt76->band_idx, MT_RX_SEL0, 0);
2472 if (err < 0)
2473 return err;
2474
2475 mt7996_dfs_stop_radar_detector(phy);
2476 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2477
2478 return 0;
2479 }
2480
2481 static int
mt7996_mac_twt_duration_align(int duration)2482 mt7996_mac_twt_duration_align(int duration)
2483 {
2484 return duration << 8;
2485 }
2486
2487 static u64
mt7996_mac_twt_sched_list_add(struct mt7996_dev * dev,struct mt7996_twt_flow * flow)2488 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
2489 struct mt7996_twt_flow *flow)
2490 {
2491 struct mt7996_twt_flow *iter, *iter_next;
2492 u32 duration = flow->duration << 8;
2493 u64 start_tsf;
2494
2495 iter = list_first_entry_or_null(&dev->twt_list,
2496 struct mt7996_twt_flow, list);
2497 if (!iter || !iter->sched || iter->start_tsf > duration) {
2498 /* add flow as first entry in the list */
2499 list_add(&flow->list, &dev->twt_list);
2500 return 0;
2501 }
2502
2503 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2504 start_tsf = iter->start_tsf +
2505 mt7996_mac_twt_duration_align(iter->duration);
2506 if (list_is_last(&iter->list, &dev->twt_list))
2507 break;
2508
2509 if (!iter_next->sched ||
2510 iter_next->start_tsf > start_tsf + duration) {
2511 list_add(&flow->list, &iter->list);
2512 goto out;
2513 }
2514 }
2515
2516 /* add flow as last entry in the list */
2517 list_add_tail(&flow->list, &dev->twt_list);
2518 out:
2519 return start_tsf;
2520 }
2521
mt7996_mac_check_twt_req(struct ieee80211_twt_setup * twt)2522 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2523 {
2524 struct ieee80211_twt_params *twt_agrt;
2525 u64 interval, duration;
2526 u16 mantissa;
2527 u8 exp;
2528
2529 /* only individual agreement supported */
2530 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2531 return -EOPNOTSUPP;
2532
2533 /* only 256us unit supported */
2534 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2535 return -EOPNOTSUPP;
2536
2537 twt_agrt = (struct ieee80211_twt_params *)twt->params;
2538
2539 /* explicit agreement not supported */
2540 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2541 return -EOPNOTSUPP;
2542
2543 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2544 le16_to_cpu(twt_agrt->req_type));
2545 mantissa = le16_to_cpu(twt_agrt->mantissa);
2546 duration = twt_agrt->min_twt_dur << 8;
2547
2548 interval = (u64)mantissa << exp;
2549 if (interval < duration)
2550 return -EOPNOTSUPP;
2551
2552 return 0;
2553 }
2554
2555 static bool
mt7996_mac_twt_param_equal(struct mt7996_sta * msta,struct ieee80211_twt_params * twt_agrt)2556 mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
2557 struct ieee80211_twt_params *twt_agrt)
2558 {
2559 u16 type = le16_to_cpu(twt_agrt->req_type);
2560 u8 exp;
2561 int i;
2562
2563 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
2564 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
2565 struct mt7996_twt_flow *f;
2566
2567 if (!(msta->twt.flowid_mask & BIT(i)))
2568 continue;
2569
2570 f = &msta->twt.flow[i];
2571 if (f->duration == twt_agrt->min_twt_dur &&
2572 f->mantissa == twt_agrt->mantissa &&
2573 f->exp == exp &&
2574 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
2575 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
2576 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
2577 return true;
2578 }
2579
2580 return false;
2581 }
2582
mt7996_mac_add_twt_setup(struct ieee80211_hw * hw,struct ieee80211_sta * sta,struct ieee80211_twt_setup * twt)2583 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
2584 struct ieee80211_sta *sta,
2585 struct ieee80211_twt_setup *twt)
2586 {
2587 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2588 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2589 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2590 u16 req_type = le16_to_cpu(twt_agrt->req_type);
2591 enum ieee80211_twt_setup_cmd sta_setup_cmd;
2592 struct mt7996_dev *dev = mt7996_hw_dev(hw);
2593 struct mt7996_twt_flow *flow;
2594 u8 flowid, table_id, exp;
2595
2596 if (mt7996_mac_check_twt_req(twt))
2597 goto out;
2598
2599 mutex_lock(&dev->mt76.mutex);
2600
2601 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
2602 goto unlock;
2603
2604 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2605 goto unlock;
2606
2607 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
2608 setup_cmd = TWT_SETUP_CMD_DICTATE;
2609 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
2610 goto unlock;
2611 }
2612
2613 if (mt7996_mac_twt_param_equal(msta, twt_agrt))
2614 goto unlock;
2615
2616 flowid = ffs(~msta->twt.flowid_mask) - 1;
2617 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
2618 twt_agrt->req_type |= le16_encode_bits(flowid,
2619 IEEE80211_TWT_REQTYPE_FLOWID);
2620
2621 table_id = ffs(~dev->twt.table_mask) - 1;
2622 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2623 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2624
2625 flow = &msta->twt.flow[flowid];
2626 memset(flow, 0, sizeof(*flow));
2627 INIT_LIST_HEAD(&flow->list);
2628 flow->wcid = msta->wcid.idx;
2629 flow->table_id = table_id;
2630 flow->id = flowid;
2631 flow->duration = twt_agrt->min_twt_dur;
2632 flow->mantissa = twt_agrt->mantissa;
2633 flow->exp = exp;
2634 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2635 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2636 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2637
2638 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2639 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2640 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2641 u64 flow_tsf, curr_tsf;
2642 u32 rem;
2643
2644 flow->sched = true;
2645 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
2646 curr_tsf = __mt7996_get_tsf(hw, msta->vif);
2647 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2648 flow_tsf = curr_tsf + interval - rem;
2649 twt_agrt->twt = cpu_to_le64(flow_tsf);
2650 } else {
2651 list_add_tail(&flow->list, &dev->twt_list);
2652 }
2653 flow->tsf = le64_to_cpu(twt_agrt->twt);
2654
2655 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2656 goto unlock;
2657
2658 setup_cmd = TWT_SETUP_CMD_ACCEPT;
2659 dev->twt.table_mask |= BIT(table_id);
2660 msta->twt.flowid_mask |= BIT(flowid);
2661 dev->twt.n_agrt++;
2662
2663 unlock:
2664 mutex_unlock(&dev->mt76.mutex);
2665 out:
2666 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
2667 twt_agrt->req_type |=
2668 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
2669 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
2670 }
2671
mt7996_mac_twt_teardown_flow(struct mt7996_dev * dev,struct mt7996_sta * msta,u8 flowid)2672 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
2673 struct mt7996_sta *msta,
2674 u8 flowid)
2675 {
2676 struct mt7996_twt_flow *flow;
2677
2678 lockdep_assert_held(&dev->mt76.mutex);
2679
2680 if (flowid >= ARRAY_SIZE(msta->twt.flow))
2681 return;
2682
2683 if (!(msta->twt.flowid_mask & BIT(flowid)))
2684 return;
2685
2686 flow = &msta->twt.flow[flowid];
2687 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
2688 MCU_TWT_AGRT_DELETE))
2689 return;
2690
2691 list_del_init(&flow->list);
2692 msta->twt.flowid_mask &= ~BIT(flowid);
2693 dev->twt.table_mask &= ~BIT(flow->table_id);
2694 dev->twt.n_agrt--;
2695 }
2696