1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
8 #include "coredump.h"
9 #include "mt7996.h"
10 #include "../dma.h"
11 #include "mac.h"
12 #include "mcu.h"
13
14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
15
16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
18 .radar_pattern = {
19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
27 },
28 };
29
30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
32 .radar_pattern = {
33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
38 },
39 };
40
41 static const struct mt7996_dfs_radar_spec jp_radar_specs = {
42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
43 .radar_pattern = {
44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
52 },
53 };
54
mt7996_rx_get_wcid(struct mt7996_dev * dev,u16 idx,u8 band_idx)55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
56 u16 idx, u8 band_idx)
57 {
58 struct mt7996_sta_link *msta_link;
59 struct mt7996_sta *msta;
60 struct mt7996_vif *mvif;
61 struct mt76_wcid *wcid;
62 int i;
63
64 wcid = mt76_wcid_ptr(dev, idx);
65 if (!wcid)
66 return NULL;
67
68 if (!mt7996_band_valid(dev, band_idx))
69 return NULL;
70
71 if (wcid->phy_idx == band_idx)
72 return wcid;
73
74 msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
75 msta = msta_link->sta;
76 if (!msta || !msta->vif)
77 return NULL;
78
79 mvif = msta->vif;
80 for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) {
81 struct mt76_vif_link *mlink;
82
83 mlink = rcu_dereference(mvif->mt76.link[i]);
84 if (!mlink)
85 continue;
86
87 if (mlink->band_idx != band_idx)
88 continue;
89
90 msta_link = rcu_dereference(msta->link[i]);
91 break;
92 }
93
94 return &msta_link->wcid;
95 }
96
mt7996_mac_wtbl_update(struct mt7996_dev * dev,int idx,u32 mask)97 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
98 {
99 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
100 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
101
102 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
103 0, 5000);
104 }
105
mt7996_mac_wtbl_lmac_addr(struct mt7996_dev * dev,u16 wcid,u8 dw)106 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
107 {
108 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
109 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
110
111 return MT_WTBL_LMAC_OFFS(wcid, dw);
112 }
113
mt7996_mac_sta_poll(struct mt7996_dev * dev)114 static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
115 {
116 static const u8 ac_to_tid[] = {
117 [IEEE80211_AC_BE] = 0,
118 [IEEE80211_AC_BK] = 1,
119 [IEEE80211_AC_VI] = 4,
120 [IEEE80211_AC_VO] = 6
121 };
122 struct mt7996_sta_link *msta_link;
123 struct mt76_vif_link *mlink;
124 struct ieee80211_sta *sta;
125 struct mt7996_sta *msta;
126 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
127 LIST_HEAD(sta_poll_list);
128 struct mt76_wcid *wcid;
129 int i;
130
131 spin_lock_bh(&dev->mt76.sta_poll_lock);
132 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
133 spin_unlock_bh(&dev->mt76.sta_poll_lock);
134
135 rcu_read_lock();
136
137 while (true) {
138 bool clear = false;
139 u32 addr, val;
140 u16 idx;
141 s8 rssi[4];
142
143 spin_lock_bh(&dev->mt76.sta_poll_lock);
144 if (list_empty(&sta_poll_list)) {
145 spin_unlock_bh(&dev->mt76.sta_poll_lock);
146 break;
147 }
148 msta_link = list_first_entry(&sta_poll_list,
149 struct mt7996_sta_link,
150 wcid.poll_list);
151 msta = msta_link->sta;
152 wcid = &msta_link->wcid;
153 list_del_init(&wcid->poll_list);
154 spin_unlock_bh(&dev->mt76.sta_poll_lock);
155
156 idx = wcid->idx;
157
158 /* refresh peer's airtime reporting */
159 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
160
161 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
162 u32 tx_last = msta_link->airtime_ac[i];
163 u32 rx_last = msta_link->airtime_ac[i + 4];
164
165 msta_link->airtime_ac[i] = mt76_rr(dev, addr);
166 msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
167
168 tx_time[i] = msta_link->airtime_ac[i] - tx_last;
169 rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last;
170
171 if ((tx_last | rx_last) & BIT(30))
172 clear = true;
173
174 addr += 8;
175 }
176
177 if (clear) {
178 mt7996_mac_wtbl_update(dev, idx,
179 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
180 memset(msta_link->airtime_ac, 0,
181 sizeof(msta_link->airtime_ac));
182 }
183
184 if (!wcid->sta)
185 continue;
186
187 sta = container_of((void *)msta, struct ieee80211_sta,
188 drv_priv);
189 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
190 u8 q = mt76_connac_lmac_mapping(i);
191 u32 tx_cur = tx_time[q];
192 u32 rx_cur = rx_time[q];
193 u8 tid = ac_to_tid[i];
194
195 if (!tx_cur && !rx_cur)
196 continue;
197
198 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
199 }
200
201 /* get signal strength of resp frames (CTS/BA/ACK) */
202 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
203 val = mt76_rr(dev, addr);
204
205 rssi[0] = to_rssi(GENMASK(7, 0), val);
206 rssi[1] = to_rssi(GENMASK(15, 8), val);
207 rssi[2] = to_rssi(GENMASK(23, 16), val);
208 rssi[3] = to_rssi(GENMASK(31, 14), val);
209
210 mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]);
211 if (mlink) {
212 struct mt76_phy *mphy = mt76_vif_link_phy(mlink);
213
214 if (mphy)
215 msta_link->ack_signal =
216 mt76_rx_signal(mphy->antenna_mask,
217 rssi);
218 }
219
220 ewma_avg_signal_add(&msta_link->avg_ack_signal,
221 -msta_link->ack_signal);
222 }
223
224 rcu_read_unlock();
225 }
226
227 /* The HW does not translate the mac header to 802.3 for mesh point */
mt7996_reverse_frag0_hdr_trans(struct sk_buff * skb,u16 hdr_gap)228 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
229 {
230 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
231 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
232 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
233 __le32 *rxd = (__le32 *)skb->data;
234 struct ieee80211_sta *sta;
235 struct ieee80211_vif *vif;
236 struct ieee80211_hdr hdr;
237 u16 frame_control;
238
239 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
240 MT_RXD3_NORMAL_U2M)
241 return -EINVAL;
242
243 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
244 return -EINVAL;
245
246 if (!msta || !msta->vif)
247 return -EINVAL;
248
249 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
250 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
251
252 /* store the info from RXD and ethhdr to avoid being overridden */
253 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
254 hdr.frame_control = cpu_to_le16(frame_control);
255 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
256 hdr.duration_id = 0;
257
258 ether_addr_copy(hdr.addr1, vif->addr);
259 ether_addr_copy(hdr.addr2, sta->addr);
260 switch (frame_control & (IEEE80211_FCTL_TODS |
261 IEEE80211_FCTL_FROMDS)) {
262 case 0:
263 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
264 break;
265 case IEEE80211_FCTL_FROMDS:
266 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
267 break;
268 case IEEE80211_FCTL_TODS:
269 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
270 break;
271 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
272 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
273 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
274 break;
275 default:
276 return -EINVAL;
277 }
278
279 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
280 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
281 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
282 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
283 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
284 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
285 else
286 skb_pull(skb, 2);
287
288 if (ieee80211_has_order(hdr.frame_control))
289 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
290 IEEE80211_HT_CTL_LEN);
291 if (ieee80211_is_data_qos(hdr.frame_control)) {
292 __le16 qos_ctrl;
293
294 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
295 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
296 IEEE80211_QOS_CTL_LEN);
297 }
298
299 if (ieee80211_has_a4(hdr.frame_control))
300 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
301 else
302 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
303
304 return 0;
305 }
306
307 static int
mt7996_mac_fill_rx_rate(struct mt7996_dev * dev,struct mt76_rx_status * status,struct ieee80211_supported_band * sband,__le32 * rxv,u8 * mode)308 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
309 struct mt76_rx_status *status,
310 struct ieee80211_supported_band *sband,
311 __le32 *rxv, u8 *mode)
312 {
313 u32 v0, v2;
314 u8 stbc, gi, bw, dcm, nss;
315 int i, idx;
316 bool cck = false;
317
318 v0 = le32_to_cpu(rxv[0]);
319 v2 = le32_to_cpu(rxv[2]);
320
321 idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
322 i = idx;
323 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
324
325 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
326 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
327 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
328 dcm = FIELD_GET(MT_PRXV_DCM, v2);
329 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
330
331 switch (*mode) {
332 case MT_PHY_TYPE_CCK:
333 cck = true;
334 fallthrough;
335 case MT_PHY_TYPE_OFDM:
336 i = mt76_get_rate(&dev->mt76, sband, i, cck);
337 break;
338 case MT_PHY_TYPE_HT_GF:
339 case MT_PHY_TYPE_HT:
340 status->encoding = RX_ENC_HT;
341 if (gi)
342 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
343 if (i > 31)
344 return -EINVAL;
345 break;
346 case MT_PHY_TYPE_VHT:
347 status->nss = nss;
348 status->encoding = RX_ENC_VHT;
349 if (gi)
350 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
351 if (i > 11)
352 return -EINVAL;
353 break;
354 case MT_PHY_TYPE_HE_MU:
355 case MT_PHY_TYPE_HE_SU:
356 case MT_PHY_TYPE_HE_EXT_SU:
357 case MT_PHY_TYPE_HE_TB:
358 status->nss = nss;
359 status->encoding = RX_ENC_HE;
360 i &= GENMASK(3, 0);
361
362 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
363 status->he_gi = gi;
364
365 status->he_dcm = dcm;
366 break;
367 case MT_PHY_TYPE_EHT_SU:
368 case MT_PHY_TYPE_EHT_TRIG:
369 case MT_PHY_TYPE_EHT_MU:
370 status->nss = nss;
371 status->encoding = RX_ENC_EHT;
372 i &= GENMASK(3, 0);
373
374 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
375 status->eht.gi = gi;
376 break;
377 default:
378 return -EINVAL;
379 }
380 status->rate_idx = i;
381
382 switch (bw) {
383 case IEEE80211_STA_RX_BW_20:
384 break;
385 case IEEE80211_STA_RX_BW_40:
386 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
387 (idx & MT_PRXV_TX_ER_SU_106T)) {
388 status->bw = RATE_INFO_BW_HE_RU;
389 status->he_ru =
390 NL80211_RATE_INFO_HE_RU_ALLOC_106;
391 } else {
392 status->bw = RATE_INFO_BW_40;
393 }
394 break;
395 case IEEE80211_STA_RX_BW_80:
396 status->bw = RATE_INFO_BW_80;
397 break;
398 case IEEE80211_STA_RX_BW_160:
399 status->bw = RATE_INFO_BW_160;
400 break;
401 /* rxv reports bw 320-1 and 320-2 separately */
402 case IEEE80211_STA_RX_BW_320:
403 case IEEE80211_STA_RX_BW_320 + 1:
404 status->bw = RATE_INFO_BW_320;
405 break;
406 default:
407 return -EINVAL;
408 }
409
410 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
411 if (*mode < MT_PHY_TYPE_HE_SU && gi)
412 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
413
414 return 0;
415 }
416
417 static void
mt7996_wed_check_ppe(struct mt7996_dev * dev,struct mt76_queue * q,struct mt7996_sta * msta,struct sk_buff * skb,u32 info)418 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
419 struct mt7996_sta *msta, struct sk_buff *skb,
420 u32 info)
421 {
422 struct ieee80211_vif *vif;
423 struct wireless_dev *wdev;
424
425 if (!msta || !msta->vif)
426 return;
427
428 if (!mt76_queue_is_wed_rx(q))
429 return;
430
431 if (!(info & MT_DMA_INFO_PPE_VLD))
432 return;
433
434 vif = container_of((void *)msta->vif, struct ieee80211_vif,
435 drv_priv);
436 wdev = ieee80211_vif_to_wdev(vif);
437 skb->dev = wdev->netdev;
438
439 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
440 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
441 FIELD_GET(MT_DMA_PPE_ENTRY, info));
442 }
443
444 static int
mt7996_mac_fill_rx(struct mt7996_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)445 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
446 struct sk_buff *skb, u32 *info)
447 {
448 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
449 struct mt76_phy *mphy = &dev->mt76.phy;
450 struct mt7996_phy *phy = &dev->phy;
451 struct ieee80211_supported_band *sband;
452 __le32 *rxd = (__le32 *)skb->data;
453 __le32 *rxv = NULL;
454 u32 rxd0 = le32_to_cpu(rxd[0]);
455 u32 rxd1 = le32_to_cpu(rxd[1]);
456 u32 rxd2 = le32_to_cpu(rxd[2]);
457 u32 rxd3 = le32_to_cpu(rxd[3]);
458 u32 rxd4 = le32_to_cpu(rxd[4]);
459 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
460 u32 csum_status = *(u32 *)skb->cb;
461 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
462 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
463 bool unicast, insert_ccmp_hdr = false;
464 u8 remove_pad, amsdu_info, band_idx;
465 u8 mode = 0, qos_ctl = 0;
466 bool hdr_trans;
467 u16 hdr_gap;
468 u16 seq_ctrl = 0;
469 __le16 fc = 0;
470 int idx;
471 u8 hw_aggr = false;
472 struct mt7996_sta *msta = NULL;
473
474 hw_aggr = status->aggr;
475 memset(status, 0, sizeof(*status));
476
477 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
478 mphy = dev->mt76.phys[band_idx];
479 phy = mphy->priv;
480 status->phy_idx = mphy->band_idx;
481
482 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
483 return -EINVAL;
484
485 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
486 return -EINVAL;
487
488 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
489 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
490 return -EINVAL;
491
492 /* ICV error or CCMP/BIP/WPI MIC error */
493 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
494 status->flag |= RX_FLAG_ONLY_MONITOR;
495
496 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
497 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
498 status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx);
499
500 if (status->wcid) {
501 struct mt7996_sta_link *msta_link;
502
503 msta_link = container_of(status->wcid, struct mt7996_sta_link,
504 wcid);
505 msta = msta_link->sta;
506 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
507 }
508
509 status->freq = mphy->chandef.chan->center_freq;
510 status->band = mphy->chandef.chan->band;
511 if (status->band == NL80211_BAND_5GHZ)
512 sband = &mphy->sband_5g.sband;
513 else if (status->band == NL80211_BAND_6GHZ)
514 sband = &mphy->sband_6g.sband;
515 else
516 sband = &mphy->sband_2g.sband;
517
518 if (!sband->channels)
519 return -EINVAL;
520
521 if ((rxd3 & csum_mask) == csum_mask &&
522 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
523 skb->ip_summed = CHECKSUM_UNNECESSARY;
524
525 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
526 status->flag |= RX_FLAG_FAILED_FCS_CRC;
527
528 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
529 status->flag |= RX_FLAG_MMIC_ERROR;
530
531 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
532 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
533 status->flag |= RX_FLAG_DECRYPTED;
534 status->flag |= RX_FLAG_IV_STRIPPED;
535 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
536 }
537
538 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
539
540 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
541 return -EINVAL;
542
543 rxd += 8;
544 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
545 u32 v0 = le32_to_cpu(rxd[0]);
546 u32 v2 = le32_to_cpu(rxd[2]);
547
548 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
549 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
550 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
551
552 rxd += 4;
553 if ((u8 *)rxd - skb->data >= skb->len)
554 return -EINVAL;
555 }
556
557 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
558 u8 *data = (u8 *)rxd;
559
560 if (status->flag & RX_FLAG_DECRYPTED) {
561 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
562 case MT_CIPHER_AES_CCMP:
563 case MT_CIPHER_CCMP_CCX:
564 case MT_CIPHER_CCMP_256:
565 insert_ccmp_hdr =
566 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
567 fallthrough;
568 case MT_CIPHER_TKIP:
569 case MT_CIPHER_TKIP_NO_MIC:
570 case MT_CIPHER_GCMP:
571 case MT_CIPHER_GCMP_256:
572 status->iv[0] = data[5];
573 status->iv[1] = data[4];
574 status->iv[2] = data[3];
575 status->iv[3] = data[2];
576 status->iv[4] = data[1];
577 status->iv[5] = data[0];
578 break;
579 default:
580 break;
581 }
582 }
583 rxd += 4;
584 if ((u8 *)rxd - skb->data >= skb->len)
585 return -EINVAL;
586 }
587
588 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
589 status->timestamp = le32_to_cpu(rxd[0]);
590 status->flag |= RX_FLAG_MACTIME_START;
591
592 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
593 status->flag |= RX_FLAG_AMPDU_DETAILS;
594
595 /* all subframes of an A-MPDU have the same timestamp */
596 if (phy->rx_ampdu_ts != status->timestamp) {
597 if (!++phy->ampdu_ref)
598 phy->ampdu_ref++;
599 }
600 phy->rx_ampdu_ts = status->timestamp;
601
602 status->ampdu_ref = phy->ampdu_ref;
603 }
604
605 rxd += 4;
606 if ((u8 *)rxd - skb->data >= skb->len)
607 return -EINVAL;
608 }
609
610 /* RXD Group 3 - P-RXV */
611 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
612 u32 v3;
613 int ret;
614
615 rxv = rxd;
616 rxd += 4;
617 if ((u8 *)rxd - skb->data >= skb->len)
618 return -EINVAL;
619
620 v3 = le32_to_cpu(rxv[3]);
621
622 status->chains = mphy->antenna_mask;
623 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
624 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
625 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
626 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
627
628 /* RXD Group 5 - C-RXV */
629 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
630 rxd += 24;
631 if ((u8 *)rxd - skb->data >= skb->len)
632 return -EINVAL;
633 }
634
635 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
636 if (ret < 0)
637 return ret;
638 }
639
640 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
641 status->amsdu = !!amsdu_info;
642 if (status->amsdu) {
643 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
644 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
645 }
646
647 /* IEEE 802.11 fragmentation can only be applied to unicast frames.
648 * Hence, drop fragments with multicast/broadcast RA.
649 * This check fixes vulnerabilities, like CVE-2020-26145.
650 */
651 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) &&
652 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M)
653 return -EINVAL;
654
655 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
656 if (hdr_trans && ieee80211_has_morefrags(fc)) {
657 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
658 return -EINVAL;
659 hdr_trans = false;
660 } else {
661 int pad_start = 0;
662
663 skb_pull(skb, hdr_gap);
664 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
665 pad_start = ieee80211_get_hdrlen_from_skb(skb);
666 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
667 /* When header translation failure is indicated,
668 * the hardware will insert an extra 2-byte field
669 * containing the data length after the protocol
670 * type field. This happens either when the LLC-SNAP
671 * pattern did not match, or if a VLAN header was
672 * detected.
673 */
674 pad_start = 12;
675 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
676 pad_start += 4;
677 else
678 pad_start = 0;
679 }
680
681 if (pad_start) {
682 memmove(skb->data + 2, skb->data, pad_start);
683 skb_pull(skb, 2);
684 }
685 }
686
687 if (!hdr_trans) {
688 struct ieee80211_hdr *hdr;
689
690 if (insert_ccmp_hdr) {
691 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
692
693 mt76_insert_ccmp_hdr(skb, key_id);
694 }
695
696 hdr = mt76_skb_get_hdr(skb);
697 fc = hdr->frame_control;
698 if (ieee80211_is_data_qos(fc)) {
699 u8 *qos = ieee80211_get_qos_ctl(hdr);
700
701 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
702 qos_ctl = *qos;
703
704 /* Mesh DA/SA/Length will be stripped after hardware
705 * de-amsdu, so here needs to clear amsdu present bit
706 * to mark it as a normal mesh frame.
707 */
708 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
709 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
710 }
711 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
712 } else {
713 status->flag |= RX_FLAG_8023;
714 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
715 *info);
716 }
717
718 if (rxv && !(status->flag & RX_FLAG_8023)) {
719 switch (status->encoding) {
720 case RX_ENC_EHT:
721 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
722 break;
723 case RX_ENC_HE:
724 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
725 break;
726 default:
727 break;
728 }
729 }
730
731 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
732 return 0;
733
734 status->aggr = unicast &&
735 !ieee80211_is_qos_nullfunc(fc);
736 status->qos_ctl = qos_ctl;
737 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
738
739 return 0;
740 }
741
742 static void
mt7996_mac_write_txwi_8023(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)743 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
744 struct sk_buff *skb, struct mt76_wcid *wcid)
745 {
746 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
747 u8 fc_type, fc_stype;
748 u16 ethertype;
749 bool wmm = false;
750 u32 val;
751
752 if (wcid->sta) {
753 struct ieee80211_sta *sta = wcid_to_sta(wcid);
754
755 wmm = sta->wme;
756 }
757
758 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
759 FIELD_PREP(MT_TXD1_TID, tid);
760
761 ethertype = get_unaligned_be16(&skb->data[12]);
762 if (ethertype >= ETH_P_802_3_MIN)
763 val |= MT_TXD1_ETH_802_3;
764
765 txwi[1] |= cpu_to_le32(val);
766
767 fc_type = IEEE80211_FTYPE_DATA >> 2;
768 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
769
770 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
771 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
772
773 txwi[2] |= cpu_to_le32(val);
774
775 if (wcid->amsdu)
776 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
777 }
778
779 static void
mt7996_mac_write_txwi_80211(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key,struct mt76_wcid * wcid)780 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
781 struct sk_buff *skb,
782 struct ieee80211_key_conf *key,
783 struct mt76_wcid *wcid)
784 {
785 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
786 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
787 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
788 bool multicast = is_multicast_ether_addr(hdr->addr1);
789 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
790 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
791 u16 seqno = le16_to_cpu(sc);
792 u8 fc_type, fc_stype;
793 u32 val;
794
795 if (ieee80211_is_action(fc) &&
796 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
797 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
798 if (is_mt7990(&dev->mt76))
799 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
800 tid = MT_TX_ADDBA;
801 } else if (ieee80211_is_mgmt(hdr->frame_control)) {
802 tid = MT_TX_NORMAL;
803 }
804
805 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
806 FIELD_PREP(MT_TXD1_HDR_INFO,
807 ieee80211_get_hdrlen_from_skb(skb) / 2) |
808 FIELD_PREP(MT_TXD1_TID, tid);
809
810 if (!ieee80211_is_data(fc) || multicast ||
811 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
812 val |= MT_TXD1_FIXED_RATE;
813
814 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb)) {
815 val |= MT_TXD1_BIP;
816 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
817 }
818
819 txwi[1] |= cpu_to_le32(val);
820
821 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
822 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
823
824 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
825 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
826
827 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
828 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
829 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
830 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
831 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
832 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
833 else
834 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
835
836 txwi[2] |= cpu_to_le32(val);
837
838 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
839 if (ieee80211_is_beacon(fc)) {
840 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
841 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
842 }
843
844 if (multicast && ieee80211_vif_is_mld(info->control.vif)) {
845 val = MT_TXD3_SN_VALID |
846 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
847 txwi[3] |= cpu_to_le32(val);
848 }
849
850 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
851 if (ieee80211_is_back_req(hdr->frame_control)) {
852 struct ieee80211_bar *bar;
853
854 bar = (struct ieee80211_bar *)skb->data;
855 seqno = le16_to_cpu(bar->start_seq_num);
856 }
857
858 val = MT_TXD3_SN_VALID |
859 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
860 txwi[3] |= cpu_to_le32(val);
861 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
862 }
863
864 if (ieee80211_vif_is_mld(info->control.vif) &&
865 (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))))
866 txwi[5] |= cpu_to_le32(MT_TXD5_FL);
867
868 if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) &&
869 ieee80211_vif_is_mld(info->control.vif)) {
870 txwi[5] |= cpu_to_le32(MT_TXD5_FL);
871 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
872 }
873
874 if (!wcid->sta && ieee80211_is_mgmt(fc))
875 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
876 }
877
mt7996_mac_write_txwi(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,int pid,enum mt76_txq_id qid,u32 changed)878 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
879 struct sk_buff *skb, struct mt76_wcid *wcid,
880 struct ieee80211_key_conf *key, int pid,
881 enum mt76_txq_id qid, u32 changed)
882 {
883 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
884 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
885 struct ieee80211_vif *vif = info->control.vif;
886 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
887 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
888 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
889 struct mt76_vif_link *mlink = NULL;
890 struct mt7996_vif *mvif;
891 unsigned int link_id;
892 u16 tx_count = 15;
893 u32 val;
894 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
895 BSS_CHANGED_FILS_DISCOVERY));
896 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
897 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
898
899 if (wcid != &dev->mt76.global_wcid)
900 link_id = wcid->link_id;
901 else
902 link_id = u32_get_bits(info->control.flags,
903 IEEE80211_TX_CTRL_MLO_LINK);
904
905 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
906 if (mvif)
907 mlink = rcu_dereference(mvif->mt76.link[link_id]);
908
909 if (mlink) {
910 omac_idx = mlink->omac_idx;
911 wmm_idx = mlink->wmm_idx;
912 band_idx = mlink->band_idx;
913 }
914
915 if (inband_disc) {
916 p_fmt = MT_TX_TYPE_FW;
917 q_idx = MT_LMAC_ALTX0;
918 } else if (beacon) {
919 p_fmt = MT_TX_TYPE_FW;
920 q_idx = MT_LMAC_BCN0;
921 } else if (qid >= MT_TXQ_PSD) {
922 p_fmt = MT_TX_TYPE_CT;
923 q_idx = MT_LMAC_ALTX0;
924 } else {
925 p_fmt = MT_TX_TYPE_CT;
926 q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
927 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
928 }
929
930 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
931 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
932 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
933 txwi[0] = cpu_to_le32(val);
934
935 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
936 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
937
938 if (band_idx)
939 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
940
941 txwi[1] = cpu_to_le32(val);
942 txwi[2] = 0;
943
944 val = MT_TXD3_SW_POWER_MGMT |
945 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
946 if (key)
947 val |= MT_TXD3_PROTECT_FRAME;
948 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
949 val |= MT_TXD3_NO_ACK;
950
951 txwi[3] = cpu_to_le32(val);
952 txwi[4] = 0;
953
954 val = FIELD_PREP(MT_TXD5_PID, pid);
955 if (pid >= MT_PACKET_ID_FIRST)
956 val |= MT_TXD5_TX_STATUS_HOST;
957 txwi[5] = cpu_to_le32(val);
958
959 val = MT_TXD6_DAS;
960 if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
961 val |= MT_TXD6_DIS_MAT;
962
963 if (is_mt7996(&dev->mt76))
964 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
965 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
966 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
967
968 txwi[6] = cpu_to_le32(val);
969 txwi[7] = 0;
970
971 if (is_8023)
972 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
973 else
974 mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid);
975
976 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
977 bool mcast = ieee80211_is_data(hdr->frame_control) &&
978 is_multicast_ether_addr(hdr->addr1);
979 u8 idx = MT7996_BASIC_RATES_TBL;
980
981 if (mlink) {
982 if (mcast && mlink->mcast_rates_idx)
983 idx = mlink->mcast_rates_idx;
984 else if (beacon && mlink->beacon_rates_idx)
985 idx = mlink->beacon_rates_idx;
986 else
987 idx = mlink->basic_rates_idx;
988 }
989
990 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
991 if (mcast)
992 val |= MT_TXD6_DIS_MAT;
993 txwi[6] |= cpu_to_le32(val);
994 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
995 }
996 }
997
998 static bool
mt7996_tx_use_mgmt(struct mt7996_dev * dev,struct sk_buff * skb)999 mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb)
1000 {
1001 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1002
1003 if (ieee80211_is_mgmt(hdr->frame_control))
1004 return true;
1005
1006 /* for SDO to bypass specific data frame */
1007 if (!mt7996_has_wa(dev)) {
1008 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
1009 return true;
1010
1011 if (ieee80211_has_a4(hdr->frame_control) &&
1012 !ieee80211_is_data_present(hdr->frame_control))
1013 return true;
1014 }
1015
1016 return false;
1017 }
1018
mt7996_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)1019 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1020 enum mt76_txq_id qid, struct mt76_wcid *wcid,
1021 struct ieee80211_sta *sta,
1022 struct mt76_tx_info *tx_info)
1023 {
1024 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1025 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1026 struct ieee80211_key_conf *key = info->control.hw_key;
1027 struct ieee80211_vif *vif = info->control.vif;
1028 struct mt76_connac_txp_common *txp;
1029 struct mt76_txwi_cache *t;
1030 int id, i, pid, nbuf = tx_info->nbuf - 1;
1031 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1032 u8 *txwi = (u8 *)txwi_ptr;
1033
1034 if (unlikely(tx_info->skb->len <= ETH_HLEN))
1035 return -EINVAL;
1036
1037 if (!wcid)
1038 wcid = &dev->mt76.global_wcid;
1039
1040 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
1041 t->skb = tx_info->skb;
1042
1043 id = mt76_token_consume(mdev, &t);
1044 if (id < 0)
1045 return id;
1046
1047 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
1048 memset(txwi_ptr, 0, MT_TXD_SIZE);
1049 /* Transmit non qos data by 802.11 header and need to fill txd by host*/
1050 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1051 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
1052 pid, qid, 0);
1053
1054 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
1055 for (i = 0; i < nbuf; i++) {
1056 u16 len;
1057
1058 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
1059 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1060 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
1061 tx_info->buf[i + 1].addr >> 32);
1062 #endif
1063
1064 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
1065 txp->fw.len[i] = cpu_to_le16(len);
1066 }
1067 txp->fw.nbuf = nbuf;
1068
1069 txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
1070
1071 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1072 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
1073
1074 if (!key)
1075 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
1076
1077 if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
1078 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
1079
1080 if (vif) {
1081 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
1082 struct mt76_vif_link *mlink = NULL;
1083
1084 if (wcid->offchannel)
1085 mlink = rcu_dereference(mvif->mt76.offchannel_link);
1086 if (!mlink)
1087 mlink = &mvif->deflink.mt76;
1088
1089 txp->fw.bss_idx = mlink->idx;
1090 }
1091
1092 txp->fw.token = cpu_to_le16(id);
1093 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
1094
1095 tx_info->skb = NULL;
1096
1097 /* pass partial skb header to fw */
1098 tx_info->buf[1].len = MT_CT_PARSE_LEN;
1099 tx_info->buf[1].skip_unmap = true;
1100 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1101
1102 return 0;
1103 }
1104
mt7996_wed_init_buf(void * ptr,dma_addr_t phys,int token_id)1105 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
1106 {
1107 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
1108 __le32 *txwi = ptr;
1109 u32 val;
1110
1111 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
1112
1113 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
1114 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
1115 txwi[0] = cpu_to_le32(val);
1116
1117 val = BIT(31) |
1118 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
1119 txwi[1] = cpu_to_le32(val);
1120
1121 txp->token = cpu_to_le16(token_id);
1122 txp->nbuf = 1;
1123 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
1124
1125 return MT_TXD_SIZE + sizeof(*txp);
1126 }
1127
1128 static void
mt7996_tx_check_aggr(struct ieee80211_sta * sta,struct sk_buff * skb)1129 mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
1130 {
1131 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1132 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1133 struct mt7996_sta_link *msta_link;
1134 struct mt7996_sta *msta;
1135 u16 fc, tid;
1136
1137 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
1138 return;
1139
1140 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1141 if (tid >= 6) /* skip VO queue */
1142 return;
1143
1144 if (is_8023) {
1145 fc = IEEE80211_FTYPE_DATA |
1146 (sta->wme ? IEEE80211_STYPE_QOS_DATA : IEEE80211_STYPE_DATA);
1147 } else {
1148 /* No need to get precise TID for Action/Management Frame,
1149 * since it will not meet the following Frame Control
1150 * condition anyway.
1151 */
1152
1153 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1154
1155 fc = le16_to_cpu(hdr->frame_control) &
1156 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
1157 }
1158
1159 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1160 return;
1161
1162 msta = (struct mt7996_sta *)sta->drv_priv;
1163 msta_link = &msta->deflink;
1164
1165 if (!test_and_set_bit(tid, &msta_link->wcid.ampdu_state))
1166 ieee80211_start_tx_ba_session(sta, tid, 0);
1167 }
1168
1169 static void
mt7996_txwi_free(struct mt7996_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_sta * sta,struct list_head * free_list)1170 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1171 struct ieee80211_sta *sta, struct list_head *free_list)
1172 {
1173 struct mt76_dev *mdev = &dev->mt76;
1174 struct mt76_wcid *wcid;
1175 __le32 *txwi;
1176 u16 wcid_idx;
1177
1178 mt76_connac_txp_skb_unmap(mdev, t);
1179 if (!t->skb)
1180 goto out;
1181
1182 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1183 if (sta) {
1184 wcid = (struct mt76_wcid *)sta->drv_priv;
1185 wcid_idx = wcid->idx;
1186
1187 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1188 mt7996_tx_check_aggr(sta, t->skb);
1189 } else {
1190 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
1191 }
1192
1193 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1194
1195 out:
1196 t->skb = NULL;
1197 mt76_put_txwi(mdev, t);
1198 }
1199
1200 static void
mt7996_mac_tx_free(struct mt7996_dev * dev,void * data,int len)1201 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1202 {
1203 __le32 *tx_free = (__le32 *)data, *cur_info;
1204 struct mt76_dev *mdev = &dev->mt76;
1205 struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1206 struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1207 struct mt76_txwi_cache *txwi;
1208 struct ieee80211_sta *sta = NULL;
1209 struct mt76_wcid *wcid = NULL;
1210 LIST_HEAD(free_list);
1211 struct sk_buff *skb, *tmp;
1212 void *end = data + len;
1213 bool wake = false;
1214 u16 total, count = 0;
1215 u8 ver;
1216
1217 /* clean DMA queues and unmap buffers first */
1218 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1219 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1220 if (phy2) {
1221 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1222 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1223 }
1224 if (phy3) {
1225 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1226 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1227 }
1228
1229 ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER);
1230 if (WARN_ON_ONCE(ver < 5))
1231 return;
1232
1233 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1234 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1235 u32 msdu, info;
1236 u8 i;
1237
1238 if (WARN_ON_ONCE((void *)cur_info >= end))
1239 return;
1240 /* 1'b1: new wcid pair.
1241 * 1'b0: msdu_id with the same 'wcid pair' as above.
1242 */
1243 info = le32_to_cpu(*cur_info);
1244 if (info & MT_TXFREE_INFO_PAIR) {
1245 struct mt7996_sta_link *msta_link;
1246 u16 idx;
1247
1248 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1249 wcid = mt76_wcid_ptr(dev, idx);
1250 sta = wcid_to_sta(wcid);
1251 if (!sta)
1252 goto next;
1253
1254 msta_link = container_of(wcid, struct mt7996_sta_link,
1255 wcid);
1256 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
1257 next:
1258 /* ver 7 has a new DW with pair = 1, skip it */
1259 if (ver == 7 && ((void *)(cur_info + 1) < end) &&
1260 (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR))
1261 cur_info++;
1262 continue;
1263 } else if (info & MT_TXFREE_INFO_HEADER) {
1264 u32 tx_retries = 0, tx_failed = 0;
1265
1266 if (!wcid)
1267 continue;
1268
1269 tx_retries =
1270 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1271 tx_failed = tx_retries +
1272 !!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1273
1274 wcid->stats.tx_retries += tx_retries;
1275 wcid->stats.tx_failed += tx_failed;
1276 continue;
1277 }
1278
1279 for (i = 0; i < 2; i++) {
1280 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1281 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1282 continue;
1283
1284 count++;
1285 txwi = mt76_token_release(mdev, msdu, &wake);
1286 if (!txwi)
1287 continue;
1288
1289 mt7996_txwi_free(dev, txwi, sta, &free_list);
1290 }
1291 }
1292
1293 mt7996_mac_sta_poll(dev);
1294
1295 if (wake)
1296 mt76_set_tx_blocked(&dev->mt76, false);
1297
1298 mt76_worker_schedule(&dev->mt76.tx_worker);
1299
1300 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1301 skb_list_del_init(skb);
1302 napi_consume_skb(skb, 1);
1303 }
1304 }
1305
1306 static bool
mt7996_mac_add_txs_skb(struct mt7996_dev * dev,struct mt76_wcid * wcid,int pid,__le32 * txs_data)1307 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
1308 int pid, __le32 *txs_data)
1309 {
1310 struct mt76_sta_stats *stats = &wcid->stats;
1311 struct ieee80211_supported_band *sband;
1312 struct mt76_dev *mdev = &dev->mt76;
1313 struct mt76_phy *mphy;
1314 struct ieee80211_tx_info *info;
1315 struct sk_buff_head list;
1316 struct rate_info rate = {};
1317 struct sk_buff *skb = NULL;
1318 bool cck = false;
1319 u32 txrate, txs, mode, stbc;
1320
1321 txs = le32_to_cpu(txs_data[0]);
1322
1323 mt76_tx_status_lock(mdev, &list);
1324
1325 /* only report MPDU TXS */
1326 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
1327 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1328 if (skb) {
1329 info = IEEE80211_SKB_CB(skb);
1330 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1331 info->flags |= IEEE80211_TX_STAT_ACK;
1332
1333 info->status.ampdu_len = 1;
1334 info->status.ampdu_ack_len =
1335 !!(info->flags & IEEE80211_TX_STAT_ACK);
1336
1337 info->status.rates[0].idx = -1;
1338 }
1339 }
1340
1341 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
1342 struct ieee80211_sta *sta;
1343 u8 tid;
1344
1345 sta = wcid_to_sta(wcid);
1346 tid = FIELD_GET(MT_TXS0_TID, txs);
1347 ieee80211_refresh_tx_agg_session_timer(sta, tid);
1348 }
1349
1350 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1351
1352 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1353 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1354 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1355
1356 if (stbc && rate.nss > 1)
1357 rate.nss >>= 1;
1358
1359 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1360 stats->tx_nss[rate.nss - 1]++;
1361 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1362 stats->tx_mcs[rate.mcs]++;
1363
1364 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1365 switch (mode) {
1366 case MT_PHY_TYPE_CCK:
1367 cck = true;
1368 fallthrough;
1369 case MT_PHY_TYPE_OFDM:
1370 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1371
1372 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1373 sband = &mphy->sband_5g.sband;
1374 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1375 sband = &mphy->sband_6g.sband;
1376 else
1377 sband = &mphy->sband_2g.sband;
1378
1379 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1380 rate.legacy = sband->bitrates[rate.mcs].bitrate;
1381 break;
1382 case MT_PHY_TYPE_HT:
1383 case MT_PHY_TYPE_HT_GF:
1384 if (rate.mcs > 31)
1385 goto out;
1386
1387 rate.flags = RATE_INFO_FLAGS_MCS;
1388 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1389 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1390 break;
1391 case MT_PHY_TYPE_VHT:
1392 if (rate.mcs > 9)
1393 goto out;
1394
1395 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1396 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1397 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1398 break;
1399 case MT_PHY_TYPE_HE_SU:
1400 case MT_PHY_TYPE_HE_EXT_SU:
1401 case MT_PHY_TYPE_HE_TB:
1402 case MT_PHY_TYPE_HE_MU:
1403 if (rate.mcs > 11)
1404 goto out;
1405
1406 rate.he_gi = wcid->rate.he_gi;
1407 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1408 rate.flags = RATE_INFO_FLAGS_HE_MCS;
1409 break;
1410 case MT_PHY_TYPE_EHT_SU:
1411 case MT_PHY_TYPE_EHT_TRIG:
1412 case MT_PHY_TYPE_EHT_MU:
1413 if (rate.mcs > 13)
1414 goto out;
1415
1416 rate.eht_gi = wcid->rate.eht_gi;
1417 rate.flags = RATE_INFO_FLAGS_EHT_MCS;
1418 break;
1419 default:
1420 goto out;
1421 }
1422
1423 stats->tx_mode[mode]++;
1424
1425 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1426 case IEEE80211_STA_RX_BW_320:
1427 rate.bw = RATE_INFO_BW_320;
1428 stats->tx_bw[4]++;
1429 break;
1430 case IEEE80211_STA_RX_BW_160:
1431 rate.bw = RATE_INFO_BW_160;
1432 stats->tx_bw[3]++;
1433 break;
1434 case IEEE80211_STA_RX_BW_80:
1435 rate.bw = RATE_INFO_BW_80;
1436 stats->tx_bw[2]++;
1437 break;
1438 case IEEE80211_STA_RX_BW_40:
1439 rate.bw = RATE_INFO_BW_40;
1440 stats->tx_bw[1]++;
1441 break;
1442 default:
1443 rate.bw = RATE_INFO_BW_20;
1444 stats->tx_bw[0]++;
1445 break;
1446 }
1447 wcid->rate = rate;
1448
1449 out:
1450 if (skb)
1451 mt76_tx_status_skb_done(mdev, skb, &list);
1452 mt76_tx_status_unlock(mdev, &list);
1453
1454 return !!skb;
1455 }
1456
mt7996_mac_add_txs(struct mt7996_dev * dev,void * data)1457 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1458 {
1459 struct mt7996_sta_link *msta_link;
1460 struct mt76_wcid *wcid;
1461 __le32 *txs_data = data;
1462 u16 wcidx;
1463 u8 pid;
1464
1465 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1466 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1467
1468 if (pid < MT_PACKET_ID_NO_SKB)
1469 return;
1470
1471 rcu_read_lock();
1472
1473 wcid = mt76_wcid_ptr(dev, wcidx);
1474 if (!wcid)
1475 goto out;
1476
1477 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
1478
1479 if (!wcid->sta)
1480 goto out;
1481
1482 msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
1483 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
1484
1485 out:
1486 rcu_read_unlock();
1487 }
1488
mt7996_rx_check(struct mt76_dev * mdev,void * data,int len)1489 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1490 {
1491 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1492 __le32 *rxd = (__le32 *)data;
1493 __le32 *end = (__le32 *)&rxd[len / 4];
1494 enum rx_pkt_type type;
1495
1496 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1497 if (type != PKT_TYPE_NORMAL) {
1498 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1499
1500 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1501 MT_RXD0_SW_PKT_TYPE_FRAME))
1502 return true;
1503 }
1504
1505 switch (type) {
1506 case PKT_TYPE_TXRX_NOTIFY:
1507 mt7996_mac_tx_free(dev, data, len);
1508 return false;
1509 case PKT_TYPE_TXS:
1510 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1511 mt7996_mac_add_txs(dev, rxd);
1512 return false;
1513 case PKT_TYPE_RX_FW_MONITOR:
1514 mt7996_debugfs_rx_fw_monitor(dev, data, len);
1515 return false;
1516 default:
1517 return true;
1518 }
1519 }
1520
mt7996_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1521 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1522 struct sk_buff *skb, u32 *info)
1523 {
1524 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1525 __le32 *rxd = (__le32 *)skb->data;
1526 __le32 *end = (__le32 *)&skb->data[skb->len];
1527 enum rx_pkt_type type;
1528
1529 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1530 if (type != PKT_TYPE_NORMAL) {
1531 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1532
1533 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1534 MT_RXD0_SW_PKT_TYPE_FRAME))
1535 type = PKT_TYPE_NORMAL;
1536 }
1537
1538 switch (type) {
1539 case PKT_TYPE_TXRX_NOTIFY:
1540 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
1541 q == MT_RXQ_TXFREE_BAND2) {
1542 dev_kfree_skb(skb);
1543 break;
1544 }
1545
1546 mt7996_mac_tx_free(dev, skb->data, skb->len);
1547 napi_consume_skb(skb, 1);
1548 break;
1549 case PKT_TYPE_RX_EVENT:
1550 mt7996_mcu_rx_event(dev, skb);
1551 break;
1552 case PKT_TYPE_TXS:
1553 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1554 mt7996_mac_add_txs(dev, rxd);
1555 dev_kfree_skb(skb);
1556 break;
1557 case PKT_TYPE_RX_FW_MONITOR:
1558 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1559 dev_kfree_skb(skb);
1560 break;
1561 case PKT_TYPE_NORMAL:
1562 if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
1563 mt76_rx(&dev->mt76, q, skb);
1564 return;
1565 }
1566 fallthrough;
1567 default:
1568 dev_kfree_skb(skb);
1569 break;
1570 }
1571 }
1572
mt7996_mac_cca_stats_reset(struct mt7996_phy * phy)1573 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1574 {
1575 struct mt7996_dev *dev = phy->dev;
1576 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
1577
1578 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
1579 mt76_set(dev, reg, BIT(11) | BIT(9));
1580 }
1581
mt7996_mac_reset_counters(struct mt7996_phy * phy)1582 void mt7996_mac_reset_counters(struct mt7996_phy *phy)
1583 {
1584 struct mt7996_dev *dev = phy->dev;
1585 u8 band_idx = phy->mt76->band_idx;
1586 int i;
1587
1588 for (i = 0; i < 16; i++)
1589 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
1590
1591 phy->mt76->survey_time = ktime_get_boottime();
1592
1593 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1594
1595 /* reset airtime counters */
1596 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
1597 MT_WF_RMAC_MIB_RXTIME_CLR);
1598
1599 mt7996_mcu_get_chan_mib_info(phy, true);
1600 }
1601
mt7996_mac_set_coverage_class(struct mt7996_phy * phy)1602 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
1603 {
1604 s16 coverage_class = phy->coverage_class;
1605 struct mt7996_dev *dev = phy->dev;
1606 struct mt7996_phy *phy2 = mt7996_phy2(dev);
1607 struct mt7996_phy *phy3 = mt7996_phy3(dev);
1608 u32 reg_offset;
1609 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1610 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1611 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1612 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1613 u8 band_idx = phy->mt76->band_idx;
1614 int offset;
1615
1616 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1617 return;
1618
1619 if (phy2)
1620 coverage_class = max_t(s16, dev->phy.coverage_class,
1621 phy2->coverage_class);
1622
1623 if (phy3)
1624 coverage_class = max_t(s16, coverage_class,
1625 phy3->coverage_class);
1626
1627 offset = 3 * coverage_class;
1628 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1629 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1630
1631 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
1632 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
1633 }
1634
mt7996_mac_enable_nf(struct mt7996_dev * dev,u8 band)1635 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
1636 {
1637 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
1638 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
1639 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
1640
1641 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
1642 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
1643 }
1644
1645 static u8
mt7996_phy_get_nf(struct mt7996_phy * phy,u8 band_idx)1646 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
1647 {
1648 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1649 struct mt7996_dev *dev = phy->dev;
1650 u32 val, sum = 0, n = 0;
1651 int ant, i;
1652
1653 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
1654 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
1655
1656 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1657 val = mt76_rr(dev, reg);
1658 sum += val * nf_power[i];
1659 n += val;
1660 }
1661 }
1662
1663 return n ? sum / n : 0;
1664 }
1665
mt7996_update_channel(struct mt76_phy * mphy)1666 void mt7996_update_channel(struct mt76_phy *mphy)
1667 {
1668 struct mt7996_phy *phy = mphy->priv;
1669 struct mt76_channel_state *state = mphy->chan_state;
1670 int nf;
1671
1672 mt7996_mcu_get_chan_mib_info(phy, false);
1673
1674 nf = mt7996_phy_get_nf(phy, mphy->band_idx);
1675 if (!phy->noise)
1676 phy->noise = nf << 4;
1677 else if (nf)
1678 phy->noise += nf - (phy->noise >> 4);
1679
1680 state->noise = -(phy->noise >> 4);
1681 }
1682
1683 static bool
mt7996_wait_reset_state(struct mt7996_dev * dev,u32 state)1684 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
1685 {
1686 bool ret;
1687
1688 ret = wait_event_timeout(dev->reset_wait,
1689 (READ_ONCE(dev->recovery.state) & state),
1690 MT7996_RESET_TIMEOUT);
1691
1692 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1693 return ret;
1694 }
1695
1696 static void
mt7996_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)1697 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1698 {
1699 struct ieee80211_hw *hw = priv;
1700
1701 switch (vif->type) {
1702 case NL80211_IFTYPE_MESH_POINT:
1703 case NL80211_IFTYPE_ADHOC:
1704 case NL80211_IFTYPE_AP:
1705 mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
1706 break;
1707 default:
1708 break;
1709 }
1710 }
1711
1712 static void
mt7996_update_beacons(struct mt7996_dev * dev)1713 mt7996_update_beacons(struct mt7996_dev *dev)
1714 {
1715 struct mt76_phy *phy2, *phy3;
1716
1717 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1718 IEEE80211_IFACE_ITER_RESUME_ALL,
1719 mt7996_update_vif_beacon, dev->mt76.hw);
1720
1721 phy2 = dev->mt76.phys[MT_BAND1];
1722 if (!phy2)
1723 return;
1724
1725 ieee80211_iterate_active_interfaces(phy2->hw,
1726 IEEE80211_IFACE_ITER_RESUME_ALL,
1727 mt7996_update_vif_beacon, phy2->hw);
1728
1729 phy3 = dev->mt76.phys[MT_BAND2];
1730 if (!phy3)
1731 return;
1732
1733 ieee80211_iterate_active_interfaces(phy3->hw,
1734 IEEE80211_IFACE_ITER_RESUME_ALL,
1735 mt7996_update_vif_beacon, phy3->hw);
1736 }
1737
mt7996_tx_token_put(struct mt7996_dev * dev)1738 void mt7996_tx_token_put(struct mt7996_dev *dev)
1739 {
1740 struct mt76_txwi_cache *txwi;
1741 int id;
1742
1743 spin_lock_bh(&dev->mt76.token_lock);
1744 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1745 mt7996_txwi_free(dev, txwi, NULL, NULL);
1746 dev->mt76.token_count--;
1747 }
1748 spin_unlock_bh(&dev->mt76.token_lock);
1749 idr_destroy(&dev->mt76.token);
1750 }
1751
1752 static int
mt7996_mac_restart(struct mt7996_dev * dev)1753 mt7996_mac_restart(struct mt7996_dev *dev)
1754 {
1755 struct mt7996_phy *phy2, *phy3;
1756 struct mt76_dev *mdev = &dev->mt76;
1757 int i, ret;
1758
1759 phy2 = mt7996_phy2(dev);
1760 phy3 = mt7996_phy3(dev);
1761
1762 if (dev->hif2) {
1763 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1764 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1765 }
1766
1767 if (dev_is_pci(mdev->dev)) {
1768 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1769 if (dev->hif2)
1770 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1771 }
1772
1773 set_bit(MT76_RESET, &dev->mphy.state);
1774 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1775 wake_up(&dev->mt76.mcu.wait);
1776 if (phy2)
1777 set_bit(MT76_RESET, &phy2->mt76->state);
1778 if (phy3)
1779 set_bit(MT76_RESET, &phy3->mt76->state);
1780
1781 /* lock/unlock all queues to ensure that no tx is pending */
1782 mt76_txq_schedule_all(&dev->mphy);
1783 if (phy2)
1784 mt76_txq_schedule_all(phy2->mt76);
1785 if (phy3)
1786 mt76_txq_schedule_all(phy3->mt76);
1787
1788 /* disable all tx/rx napi */
1789 mt76_worker_disable(&dev->mt76.tx_worker);
1790 mt76_for_each_q_rx(mdev, i) {
1791 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1792 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1793 continue;
1794
1795 if (mdev->q_rx[i].ndesc)
1796 napi_disable(&dev->mt76.napi[i]);
1797 }
1798 napi_disable(&dev->mt76.tx_napi);
1799
1800 /* token reinit */
1801 mt7996_tx_token_put(dev);
1802 idr_init(&dev->mt76.token);
1803
1804 mt7996_dma_reset(dev, true);
1805
1806 mt76_for_each_q_rx(mdev, i) {
1807 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1808 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1809 continue;
1810
1811 if (mdev->q_rx[i].ndesc) {
1812 napi_enable(&dev->mt76.napi[i]);
1813 local_bh_disable();
1814 napi_schedule(&dev->mt76.napi[i]);
1815 local_bh_enable();
1816 }
1817 }
1818 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1819 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1820
1821 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1822 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1823 if (dev->hif2) {
1824 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1825 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1826 }
1827 if (dev_is_pci(mdev->dev)) {
1828 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1829 if (dev->hif2)
1830 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1831 }
1832
1833 /* load firmware */
1834 ret = mt7996_mcu_init_firmware(dev);
1835 if (ret)
1836 goto out;
1837
1838 /* set the necessary init items */
1839 ret = mt7996_mcu_set_eeprom(dev);
1840 if (ret)
1841 goto out;
1842
1843 mt7996_mac_init(dev);
1844 mt7996_init_txpower(&dev->phy);
1845 mt7996_init_txpower(phy2);
1846 mt7996_init_txpower(phy3);
1847 ret = mt7996_txbf_init(dev);
1848
1849 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1850 ret = mt7996_run(&dev->phy);
1851 if (ret)
1852 goto out;
1853 }
1854
1855 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
1856 ret = mt7996_run(phy2);
1857 if (ret)
1858 goto out;
1859 }
1860
1861 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
1862 ret = mt7996_run(phy3);
1863 if (ret)
1864 goto out;
1865 }
1866
1867 out:
1868 /* reset done */
1869 clear_bit(MT76_RESET, &dev->mphy.state);
1870 if (phy2)
1871 clear_bit(MT76_RESET, &phy2->mt76->state);
1872 if (phy3)
1873 clear_bit(MT76_RESET, &phy3->mt76->state);
1874
1875 napi_enable(&dev->mt76.tx_napi);
1876 local_bh_disable();
1877 napi_schedule(&dev->mt76.tx_napi);
1878 local_bh_enable();
1879
1880 mt76_worker_enable(&dev->mt76.tx_worker);
1881 return ret;
1882 }
1883
1884 static void
mt7996_mac_full_reset(struct mt7996_dev * dev)1885 mt7996_mac_full_reset(struct mt7996_dev *dev)
1886 {
1887 struct mt7996_phy *phy2, *phy3;
1888 int i;
1889
1890 phy2 = mt7996_phy2(dev);
1891 phy3 = mt7996_phy3(dev);
1892 dev->recovery.hw_full_reset = true;
1893
1894 wake_up(&dev->mt76.mcu.wait);
1895 ieee80211_stop_queues(mt76_hw(dev));
1896 if (phy2)
1897 ieee80211_stop_queues(phy2->mt76->hw);
1898 if (phy3)
1899 ieee80211_stop_queues(phy3->mt76->hw);
1900
1901 cancel_work_sync(&dev->wed_rro.work);
1902 cancel_delayed_work_sync(&dev->mphy.mac_work);
1903 if (phy2)
1904 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1905 if (phy3)
1906 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1907
1908 mutex_lock(&dev->mt76.mutex);
1909 for (i = 0; i < 10; i++) {
1910 if (!mt7996_mac_restart(dev))
1911 break;
1912 }
1913 mutex_unlock(&dev->mt76.mutex);
1914
1915 if (i == 10)
1916 dev_err(dev->mt76.dev, "chip full reset failed\n");
1917
1918 ieee80211_restart_hw(mt76_hw(dev));
1919 if (phy2)
1920 ieee80211_restart_hw(phy2->mt76->hw);
1921 if (phy3)
1922 ieee80211_restart_hw(phy3->mt76->hw);
1923
1924 ieee80211_wake_queues(mt76_hw(dev));
1925 if (phy2)
1926 ieee80211_wake_queues(phy2->mt76->hw);
1927 if (phy3)
1928 ieee80211_wake_queues(phy3->mt76->hw);
1929
1930 dev->recovery.hw_full_reset = false;
1931 ieee80211_queue_delayed_work(mt76_hw(dev),
1932 &dev->mphy.mac_work,
1933 MT7996_WATCHDOG_TIME);
1934 if (phy2)
1935 ieee80211_queue_delayed_work(phy2->mt76->hw,
1936 &phy2->mt76->mac_work,
1937 MT7996_WATCHDOG_TIME);
1938 if (phy3)
1939 ieee80211_queue_delayed_work(phy3->mt76->hw,
1940 &phy3->mt76->mac_work,
1941 MT7996_WATCHDOG_TIME);
1942 }
1943
mt7996_mac_reset_work(struct work_struct * work)1944 void mt7996_mac_reset_work(struct work_struct *work)
1945 {
1946 struct mt7996_phy *phy2, *phy3;
1947 struct mt7996_dev *dev;
1948 int i;
1949
1950 dev = container_of(work, struct mt7996_dev, reset_work);
1951 phy2 = mt7996_phy2(dev);
1952 phy3 = mt7996_phy3(dev);
1953
1954 /* chip full reset */
1955 if (dev->recovery.restart) {
1956 /* disable WA/WM WDT */
1957 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1958 MT_MCU_CMD_WDT_MASK);
1959
1960 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1961 dev->recovery.wa_reset_count++;
1962 else
1963 dev->recovery.wm_reset_count++;
1964
1965 mt7996_mac_full_reset(dev);
1966
1967 /* enable mcu irq */
1968 mt7996_irq_enable(dev, MT_INT_MCU_CMD);
1969 mt7996_irq_disable(dev, 0);
1970
1971 /* enable WA/WM WDT */
1972 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1973
1974 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1975 dev->recovery.restart = false;
1976 return;
1977 }
1978
1979 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1980 return;
1981
1982 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
1983 wiphy_name(dev->mt76.hw->wiphy));
1984
1985 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
1986 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
1987
1988 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
1989 mtk_wed_device_stop(&dev->mt76.mmio.wed);
1990
1991 ieee80211_stop_queues(mt76_hw(dev));
1992 if (phy2)
1993 ieee80211_stop_queues(phy2->mt76->hw);
1994 if (phy3)
1995 ieee80211_stop_queues(phy3->mt76->hw);
1996
1997 set_bit(MT76_RESET, &dev->mphy.state);
1998 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1999 wake_up(&dev->mt76.mcu.wait);
2000
2001 cancel_work_sync(&dev->wed_rro.work);
2002 cancel_delayed_work_sync(&dev->mphy.mac_work);
2003 if (phy2) {
2004 set_bit(MT76_RESET, &phy2->mt76->state);
2005 cancel_delayed_work_sync(&phy2->mt76->mac_work);
2006 }
2007 if (phy3) {
2008 set_bit(MT76_RESET, &phy3->mt76->state);
2009 cancel_delayed_work_sync(&phy3->mt76->mac_work);
2010 }
2011 mt76_worker_disable(&dev->mt76.tx_worker);
2012 mt76_for_each_q_rx(&dev->mt76, i) {
2013 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2014 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2015 continue;
2016
2017 napi_disable(&dev->mt76.napi[i]);
2018 }
2019 napi_disable(&dev->mt76.tx_napi);
2020
2021 mutex_lock(&dev->mt76.mutex);
2022
2023 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
2024
2025 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
2026 mt7996_dma_reset(dev, false);
2027
2028 mt7996_tx_token_put(dev);
2029 idr_init(&dev->mt76.token);
2030
2031 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
2032 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
2033 }
2034
2035 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
2036 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
2037
2038 /* enable DMA Tx/Tx and interrupt */
2039 mt7996_dma_start(dev, false, false);
2040
2041 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
2042 u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
2043 dev->mt76.mmio.irqmask;
2044
2045 if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
2046 wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
2047
2048 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
2049
2050 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
2051 true);
2052 mt7996_irq_enable(dev, wed_irq_mask);
2053 mt7996_irq_disable(dev, 0);
2054 }
2055
2056 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
2057 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
2058 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
2059 MT_INT_TX_RX_DONE_EXT);
2060 }
2061
2062 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
2063 clear_bit(MT76_RESET, &dev->mphy.state);
2064 if (phy2)
2065 clear_bit(MT76_RESET, &phy2->mt76->state);
2066 if (phy3)
2067 clear_bit(MT76_RESET, &phy3->mt76->state);
2068
2069 mt76_for_each_q_rx(&dev->mt76, i) {
2070 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2071 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2072 continue;
2073
2074 napi_enable(&dev->mt76.napi[i]);
2075 local_bh_disable();
2076 napi_schedule(&dev->mt76.napi[i]);
2077 local_bh_enable();
2078 }
2079
2080 tasklet_schedule(&dev->mt76.irq_tasklet);
2081
2082 mt76_worker_enable(&dev->mt76.tx_worker);
2083
2084 napi_enable(&dev->mt76.tx_napi);
2085 local_bh_disable();
2086 napi_schedule(&dev->mt76.tx_napi);
2087 local_bh_enable();
2088
2089 ieee80211_wake_queues(mt76_hw(dev));
2090 if (phy2)
2091 ieee80211_wake_queues(phy2->mt76->hw);
2092 if (phy3)
2093 ieee80211_wake_queues(phy3->mt76->hw);
2094
2095 mutex_unlock(&dev->mt76.mutex);
2096
2097 mt7996_update_beacons(dev);
2098
2099 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
2100 MT7996_WATCHDOG_TIME);
2101 if (phy2)
2102 ieee80211_queue_delayed_work(phy2->mt76->hw,
2103 &phy2->mt76->mac_work,
2104 MT7996_WATCHDOG_TIME);
2105 if (phy3)
2106 ieee80211_queue_delayed_work(phy3->mt76->hw,
2107 &phy3->mt76->mac_work,
2108 MT7996_WATCHDOG_TIME);
2109 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
2110 wiphy_name(dev->mt76.hw->wiphy));
2111 }
2112
2113 /* firmware coredump */
mt7996_mac_dump_work(struct work_struct * work)2114 void mt7996_mac_dump_work(struct work_struct *work)
2115 {
2116 const struct mt7996_mem_region *mem_region;
2117 struct mt7996_crash_data *crash_data;
2118 struct mt7996_dev *dev;
2119 struct mt7996_mem_hdr *hdr;
2120 size_t buf_len;
2121 int i;
2122 u32 num;
2123 u8 *buf;
2124
2125 dev = container_of(work, struct mt7996_dev, dump_work);
2126
2127 mutex_lock(&dev->dump_mutex);
2128
2129 crash_data = mt7996_coredump_new(dev);
2130 if (!crash_data) {
2131 mutex_unlock(&dev->dump_mutex);
2132 goto skip_coredump;
2133 }
2134
2135 mem_region = mt7996_coredump_get_mem_layout(dev, &num);
2136 if (!mem_region || !crash_data->memdump_buf_len) {
2137 mutex_unlock(&dev->dump_mutex);
2138 goto skip_memdump;
2139 }
2140
2141 buf = crash_data->memdump_buf;
2142 buf_len = crash_data->memdump_buf_len;
2143
2144 /* dumping memory content... */
2145 memset(buf, 0, buf_len);
2146 for (i = 0; i < num; i++) {
2147 if (mem_region->len > buf_len) {
2148 dev_warn(dev->mt76.dev, "%s len %zu is too large\n",
2149 mem_region->name, mem_region->len);
2150 break;
2151 }
2152
2153 /* reserve space for the header */
2154 hdr = (void *)buf;
2155 buf += sizeof(*hdr);
2156 buf_len -= sizeof(*hdr);
2157
2158 mt7996_memcpy_fromio(dev, buf, mem_region->start,
2159 mem_region->len);
2160
2161 hdr->start = mem_region->start;
2162 hdr->len = mem_region->len;
2163
2164 if (!mem_region->len)
2165 /* note: the header remains, just with zero length */
2166 break;
2167
2168 buf += mem_region->len;
2169 buf_len -= mem_region->len;
2170
2171 mem_region++;
2172 }
2173
2174 mutex_unlock(&dev->dump_mutex);
2175
2176 skip_memdump:
2177 mt7996_coredump_submit(dev);
2178 skip_coredump:
2179 queue_work(dev->mt76.wq, &dev->reset_work);
2180 }
2181
mt7996_reset(struct mt7996_dev * dev)2182 void mt7996_reset(struct mt7996_dev *dev)
2183 {
2184 if (!dev->recovery.hw_init_done)
2185 return;
2186
2187 if (dev->recovery.hw_full_reset)
2188 return;
2189
2190 /* wm/wa exception: do full recovery */
2191 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
2192 dev->recovery.restart = true;
2193 dev_info(dev->mt76.dev,
2194 "%s indicated firmware crash, attempting recovery\n",
2195 wiphy_name(dev->mt76.hw->wiphy));
2196
2197 mt7996_irq_disable(dev, MT_INT_MCU_CMD);
2198 queue_work(dev->mt76.wq, &dev->dump_work);
2199 return;
2200 }
2201
2202 queue_work(dev->mt76.wq, &dev->reset_work);
2203 wake_up(&dev->reset_wait);
2204 }
2205
mt7996_mac_update_stats(struct mt7996_phy * phy)2206 void mt7996_mac_update_stats(struct mt7996_phy *phy)
2207 {
2208 struct mt76_mib_stats *mib = &phy->mib;
2209 struct mt7996_dev *dev = phy->dev;
2210 u8 band_idx = phy->mt76->band_idx;
2211 u32 cnt;
2212 int i;
2213
2214 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
2215 mib->fcs_err_cnt += cnt;
2216
2217 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
2218 mib->rx_fifo_full_cnt += cnt;
2219
2220 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
2221 mib->rx_mpdu_cnt += cnt;
2222
2223 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
2224 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
2225
2226 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
2227 mib->rx_vector_mismatch_cnt += cnt;
2228
2229 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
2230 mib->rx_delimiter_fail_cnt += cnt;
2231
2232 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
2233 mib->rx_len_mismatch_cnt += cnt;
2234
2235 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
2236 mib->tx_ampdu_cnt += cnt;
2237
2238 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
2239 mib->tx_stop_q_empty_cnt += cnt;
2240
2241 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
2242 mib->tx_mpdu_attempts_cnt += cnt;
2243
2244 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2245 mib->tx_mpdu_success_cnt += cnt;
2246
2247 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2248 mib->rx_ampdu_cnt += cnt;
2249
2250 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2251 mib->rx_ampdu_bytes_cnt += cnt;
2252
2253 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2254 mib->rx_ampdu_valid_subframe_cnt += cnt;
2255
2256 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2257 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2258
2259 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2260 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2261
2262 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2263 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2264
2265 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2266 mib->rx_pfdrop_cnt += cnt;
2267
2268 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2269 mib->rx_vec_queue_overflow_drop_cnt += cnt;
2270
2271 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2272 mib->rx_ba_cnt += cnt;
2273
2274 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2275 mib->tx_bf_ebf_ppdu_cnt += cnt;
2276
2277 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2278 mib->tx_bf_ibf_ppdu_cnt += cnt;
2279
2280 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2281 mib->tx_mu_bf_cnt += cnt;
2282
2283 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2284 mib->tx_mu_mpdu_cnt += cnt;
2285
2286 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2287 mib->tx_mu_acked_mpdu_cnt += cnt;
2288
2289 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2290 mib->tx_su_acked_mpdu_cnt += cnt;
2291
2292 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2293 mib->tx_bf_rx_fb_ht_cnt += cnt;
2294 mib->tx_bf_rx_fb_all_cnt += cnt;
2295
2296 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2297 mib->tx_bf_rx_fb_vht_cnt += cnt;
2298 mib->tx_bf_rx_fb_all_cnt += cnt;
2299
2300 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2301 mib->tx_bf_rx_fb_he_cnt += cnt;
2302 mib->tx_bf_rx_fb_all_cnt += cnt;
2303
2304 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2305 mib->tx_bf_rx_fb_eht_cnt += cnt;
2306 mib->tx_bf_rx_fb_all_cnt += cnt;
2307
2308 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2309 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2310 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2311 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2312
2313 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2314 mib->tx_bf_fb_trig_cnt += cnt;
2315
2316 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2317 mib->tx_bf_fb_cpl_cnt += cnt;
2318
2319 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2320 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2321 mib->tx_amsdu[i] += cnt;
2322 mib->tx_amsdu_cnt += cnt;
2323 }
2324
2325 /* rts count */
2326 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2327 mib->rts_cnt += cnt;
2328
2329 /* rts retry count */
2330 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2331 mib->rts_retries_cnt += cnt;
2332
2333 /* ba miss count */
2334 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2335 mib->ba_miss_cnt += cnt;
2336
2337 /* ack fail count */
2338 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2339 mib->ack_fail_cnt += cnt;
2340
2341 for (i = 0; i < 16; i++) {
2342 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2343 phy->mt76->aggr_stats[i] += cnt;
2344 }
2345 }
2346
mt7996_mac_sta_rc_work(struct work_struct * work)2347 void mt7996_mac_sta_rc_work(struct work_struct *work)
2348 {
2349 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2350 struct mt7996_sta_link *msta_link;
2351 struct ieee80211_vif *vif;
2352 struct mt7996_vif *mvif;
2353 LIST_HEAD(list);
2354 u32 changed;
2355
2356 spin_lock_bh(&dev->mt76.sta_poll_lock);
2357 list_splice_init(&dev->sta_rc_list, &list);
2358
2359 while (!list_empty(&list)) {
2360 msta_link = list_first_entry(&list, struct mt7996_sta_link,
2361 rc_list);
2362 list_del_init(&msta_link->rc_list);
2363
2364 changed = msta_link->changed;
2365 msta_link->changed = 0;
2366 mvif = msta_link->sta->vif;
2367 vif = container_of((void *)mvif, struct ieee80211_vif,
2368 drv_priv);
2369
2370 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2371
2372 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2373 IEEE80211_RC_NSS_CHANGED |
2374 IEEE80211_RC_BW_CHANGED))
2375 mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif,
2376 msta_link->wcid.link_id,
2377 true);
2378
2379 if (changed & IEEE80211_RC_SMPS_CHANGED)
2380 mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL,
2381 msta_link->wcid.link_id,
2382 RATE_PARAM_MMPS_UPDATE);
2383
2384 spin_lock_bh(&dev->mt76.sta_poll_lock);
2385 }
2386
2387 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2388 }
2389
mt7996_mac_work(struct work_struct * work)2390 void mt7996_mac_work(struct work_struct *work)
2391 {
2392 struct mt7996_phy *phy;
2393 struct mt76_phy *mphy;
2394
2395 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2396 mac_work.work);
2397 phy = mphy->priv;
2398
2399 mutex_lock(&mphy->dev->mutex);
2400
2401 mt76_update_survey(mphy);
2402 if (++mphy->mac_work_count == 5) {
2403 mphy->mac_work_count = 0;
2404
2405 mt7996_mac_update_stats(phy);
2406
2407 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE);
2408 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
2409 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT);
2410 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT);
2411 }
2412 }
2413
2414 mutex_unlock(&mphy->dev->mutex);
2415
2416 mt76_tx_status_check(mphy->dev, false);
2417
2418 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2419 MT7996_WATCHDOG_TIME);
2420 }
2421
mt7996_dfs_stop_radar_detector(struct mt7996_phy * phy)2422 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2423 {
2424 struct mt7996_dev *dev = phy->dev;
2425 int rdd_idx = mt7996_get_rdd_idx(phy, false);
2426
2427 if (rdd_idx < 0)
2428 return;
2429
2430 mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0);
2431 }
2432
mt7996_dfs_start_rdd(struct mt7996_dev * dev,int rdd_idx)2433 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx)
2434 {
2435 int err, region;
2436
2437 switch (dev->mt76.region) {
2438 case NL80211_DFS_ETSI:
2439 region = 0;
2440 break;
2441 case NL80211_DFS_JP:
2442 region = 2;
2443 break;
2444 case NL80211_DFS_FCC:
2445 default:
2446 region = 1;
2447 break;
2448 }
2449
2450 err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region);
2451 if (err < 0)
2452 return err;
2453
2454 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1);
2455 }
2456
mt7996_dfs_start_radar_detector(struct mt7996_phy * phy)2457 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2458 {
2459 struct mt7996_dev *dev = phy->dev;
2460 int err, rdd_idx;
2461
2462 rdd_idx = mt7996_get_rdd_idx(phy, false);
2463 if (rdd_idx < 0)
2464 return -EINVAL;
2465
2466 /* start CAC */
2467 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0);
2468 if (err < 0)
2469 return err;
2470
2471 err = mt7996_dfs_start_rdd(dev, rdd_idx);
2472
2473 return err;
2474 }
2475
2476 static int
mt7996_dfs_init_radar_specs(struct mt7996_phy * phy)2477 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
2478 {
2479 const struct mt7996_dfs_radar_spec *radar_specs;
2480 struct mt7996_dev *dev = phy->dev;
2481 int err, i;
2482
2483 switch (dev->mt76.region) {
2484 case NL80211_DFS_FCC:
2485 radar_specs = &fcc_radar_specs;
2486 err = mt7996_mcu_set_fcc5_lpn(dev, 8);
2487 if (err < 0)
2488 return err;
2489 break;
2490 case NL80211_DFS_ETSI:
2491 radar_specs = &etsi_radar_specs;
2492 break;
2493 case NL80211_DFS_JP:
2494 radar_specs = &jp_radar_specs;
2495 break;
2496 default:
2497 return -EINVAL;
2498 }
2499
2500 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2501 err = mt7996_mcu_set_radar_th(dev, i,
2502 &radar_specs->radar_pattern[i]);
2503 if (err < 0)
2504 return err;
2505 }
2506
2507 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2508 }
2509
mt7996_dfs_init_radar_detector(struct mt7996_phy * phy)2510 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2511 {
2512 struct mt7996_dev *dev = phy->dev;
2513 enum mt76_dfs_state dfs_state, prev_state;
2514 int err, rdd_idx = mt7996_get_rdd_idx(phy, false);
2515
2516 prev_state = phy->mt76->dfs_state;
2517 dfs_state = mt76_phy_dfs_state(phy->mt76);
2518
2519 if (prev_state == dfs_state || rdd_idx < 0)
2520 return 0;
2521
2522 if (prev_state == MT_DFS_STATE_UNKNOWN)
2523 mt7996_dfs_stop_radar_detector(phy);
2524
2525 if (dfs_state == MT_DFS_STATE_DISABLED)
2526 goto stop;
2527
2528 if (prev_state <= MT_DFS_STATE_DISABLED) {
2529 err = mt7996_dfs_init_radar_specs(phy);
2530 if (err < 0)
2531 return err;
2532
2533 err = mt7996_dfs_start_radar_detector(phy);
2534 if (err < 0)
2535 return err;
2536
2537 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2538 }
2539
2540 if (dfs_state == MT_DFS_STATE_CAC)
2541 return 0;
2542
2543 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0);
2544 if (err < 0) {
2545 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2546 return err;
2547 }
2548
2549 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2550 return 0;
2551
2552 stop:
2553 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0);
2554 if (err < 0)
2555 return err;
2556
2557 mt7996_dfs_stop_radar_detector(phy);
2558 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2559
2560 return 0;
2561 }
2562
2563 static int
mt7996_mac_twt_duration_align(int duration)2564 mt7996_mac_twt_duration_align(int duration)
2565 {
2566 return duration << 8;
2567 }
2568
2569 static u64
mt7996_mac_twt_sched_list_add(struct mt7996_dev * dev,struct mt7996_twt_flow * flow)2570 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
2571 struct mt7996_twt_flow *flow)
2572 {
2573 struct mt7996_twt_flow *iter, *iter_next;
2574 u32 duration = flow->duration << 8;
2575 u64 start_tsf;
2576
2577 iter = list_first_entry_or_null(&dev->twt_list,
2578 struct mt7996_twt_flow, list);
2579 if (!iter || !iter->sched || iter->start_tsf > duration) {
2580 /* add flow as first entry in the list */
2581 list_add(&flow->list, &dev->twt_list);
2582 return 0;
2583 }
2584
2585 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2586 start_tsf = iter->start_tsf +
2587 mt7996_mac_twt_duration_align(iter->duration);
2588 if (list_is_last(&iter->list, &dev->twt_list))
2589 break;
2590
2591 if (!iter_next->sched ||
2592 iter_next->start_tsf > start_tsf + duration) {
2593 list_add(&flow->list, &iter->list);
2594 goto out;
2595 }
2596 }
2597
2598 /* add flow as last entry in the list */
2599 list_add_tail(&flow->list, &dev->twt_list);
2600 out:
2601 return start_tsf;
2602 }
2603
mt7996_mac_check_twt_req(struct ieee80211_twt_setup * twt)2604 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2605 {
2606 struct ieee80211_twt_params *twt_agrt;
2607 u64 interval, duration;
2608 u16 mantissa;
2609 u8 exp;
2610
2611 /* only individual agreement supported */
2612 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2613 return -EOPNOTSUPP;
2614
2615 /* only 256us unit supported */
2616 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2617 return -EOPNOTSUPP;
2618
2619 twt_agrt = (struct ieee80211_twt_params *)twt->params;
2620
2621 /* explicit agreement not supported */
2622 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2623 return -EOPNOTSUPP;
2624
2625 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2626 le16_to_cpu(twt_agrt->req_type));
2627 mantissa = le16_to_cpu(twt_agrt->mantissa);
2628 duration = twt_agrt->min_twt_dur << 8;
2629
2630 interval = (u64)mantissa << exp;
2631 if (interval < duration)
2632 return -EOPNOTSUPP;
2633
2634 return 0;
2635 }
2636
2637 static bool
mt7996_mac_twt_param_equal(struct mt7996_sta_link * msta_link,struct ieee80211_twt_params * twt_agrt)2638 mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link,
2639 struct ieee80211_twt_params *twt_agrt)
2640 {
2641 u16 type = le16_to_cpu(twt_agrt->req_type);
2642 u8 exp;
2643 int i;
2644
2645 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
2646 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
2647 struct mt7996_twt_flow *f;
2648
2649 if (!(msta_link->twt.flowid_mask & BIT(i)))
2650 continue;
2651
2652 f = &msta_link->twt.flow[i];
2653 if (f->duration == twt_agrt->min_twt_dur &&
2654 f->mantissa == twt_agrt->mantissa &&
2655 f->exp == exp &&
2656 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
2657 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
2658 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
2659 return true;
2660 }
2661
2662 return false;
2663 }
2664
mt7996_mac_add_twt_setup(struct ieee80211_hw * hw,struct ieee80211_sta * sta,struct ieee80211_twt_setup * twt)2665 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
2666 struct ieee80211_sta *sta,
2667 struct ieee80211_twt_setup *twt)
2668 {
2669 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2670 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2671 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2672 struct mt7996_sta_link *msta_link = &msta->deflink;
2673 u16 req_type = le16_to_cpu(twt_agrt->req_type);
2674 enum ieee80211_twt_setup_cmd sta_setup_cmd;
2675 struct mt7996_dev *dev = mt7996_hw_dev(hw);
2676 struct mt7996_twt_flow *flow;
2677 u8 flowid, table_id, exp;
2678
2679 if (mt7996_mac_check_twt_req(twt))
2680 goto out;
2681
2682 mutex_lock(&dev->mt76.mutex);
2683
2684 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
2685 goto unlock;
2686
2687 if (hweight8(msta_link->twt.flowid_mask) ==
2688 ARRAY_SIZE(msta_link->twt.flow))
2689 goto unlock;
2690
2691 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
2692 setup_cmd = TWT_SETUP_CMD_DICTATE;
2693 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
2694 goto unlock;
2695 }
2696
2697 if (mt7996_mac_twt_param_equal(msta_link, twt_agrt))
2698 goto unlock;
2699
2700 flowid = ffs(~msta_link->twt.flowid_mask) - 1;
2701 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
2702 twt_agrt->req_type |= le16_encode_bits(flowid,
2703 IEEE80211_TWT_REQTYPE_FLOWID);
2704
2705 table_id = ffs(~dev->twt.table_mask) - 1;
2706 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2707 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2708
2709 flow = &msta_link->twt.flow[flowid];
2710 memset(flow, 0, sizeof(*flow));
2711 INIT_LIST_HEAD(&flow->list);
2712 flow->wcid = msta_link->wcid.idx;
2713 flow->table_id = table_id;
2714 flow->id = flowid;
2715 flow->duration = twt_agrt->min_twt_dur;
2716 flow->mantissa = twt_agrt->mantissa;
2717 flow->exp = exp;
2718 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2719 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2720 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2721
2722 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2723 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2724 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2725 u64 flow_tsf, curr_tsf;
2726 u32 rem;
2727
2728 flow->sched = true;
2729 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
2730 curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink);
2731 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2732 flow_tsf = curr_tsf + interval - rem;
2733 twt_agrt->twt = cpu_to_le64(flow_tsf);
2734 } else {
2735 list_add_tail(&flow->list, &dev->twt_list);
2736 }
2737 flow->tsf = le64_to_cpu(twt_agrt->twt);
2738
2739 if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow,
2740 MCU_TWT_AGRT_ADD))
2741 goto unlock;
2742
2743 setup_cmd = TWT_SETUP_CMD_ACCEPT;
2744 dev->twt.table_mask |= BIT(table_id);
2745 msta_link->twt.flowid_mask |= BIT(flowid);
2746 dev->twt.n_agrt++;
2747
2748 unlock:
2749 mutex_unlock(&dev->mt76.mutex);
2750 out:
2751 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
2752 twt_agrt->req_type |=
2753 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
2754 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
2755 }
2756
mt7996_mac_twt_teardown_flow(struct mt7996_dev * dev,struct mt7996_vif_link * link,struct mt7996_sta_link * msta_link,u8 flowid)2757 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
2758 struct mt7996_vif_link *link,
2759 struct mt7996_sta_link *msta_link,
2760 u8 flowid)
2761 {
2762 struct mt7996_twt_flow *flow;
2763
2764 lockdep_assert_held(&dev->mt76.mutex);
2765
2766 if (flowid >= ARRAY_SIZE(msta_link->twt.flow))
2767 return;
2768
2769 if (!(msta_link->twt.flowid_mask & BIT(flowid)))
2770 return;
2771
2772 flow = &msta_link->twt.flow[flowid];
2773 if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE))
2774 return;
2775
2776 list_del_init(&flow->list);
2777 msta_link->twt.flowid_mask &= ~BIT(flowid);
2778 dev->twt.table_mask &= ~BIT(flow->table_id);
2779 dev->twt.n_agrt--;
2780 }
2781