1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
8 #include "coredump.h"
9 #include "mt7996.h"
10 #include "../dma.h"
11 #include "mac.h"
12 #include "mcu.h"
13
14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
15
16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
18 .radar_pattern = {
19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
27 },
28 };
29
30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
32 .radar_pattern = {
33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
38 },
39 };
40
41 static const struct mt7996_dfs_radar_spec jp_radar_specs = {
42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
43 .radar_pattern = {
44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
52 },
53 };
54
mt7996_rx_get_wcid(struct mt7996_dev * dev,u16 idx,u8 band_idx)55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
56 u16 idx, u8 band_idx)
57 {
58 struct mt7996_sta_link *msta_link;
59 struct mt7996_sta *msta;
60 struct mt7996_vif *mvif;
61 struct mt76_wcid *wcid;
62 int i;
63
64 wcid = mt76_wcid_ptr(dev, idx);
65 if (!wcid || !wcid->sta)
66 return NULL;
67
68 if (!mt7996_band_valid(dev, band_idx))
69 return NULL;
70
71 if (wcid->phy_idx == band_idx)
72 return wcid;
73
74 msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
75 msta = msta_link->sta;
76 if (!msta || !msta->vif)
77 return NULL;
78
79 mvif = msta->vif;
80 for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) {
81 struct mt76_vif_link *mlink;
82
83 mlink = rcu_dereference(mvif->mt76.link[i]);
84 if (!mlink)
85 continue;
86
87 if (mlink->band_idx != band_idx)
88 continue;
89
90 msta_link = rcu_dereference(msta->link[i]);
91 break;
92 }
93
94 return &msta_link->wcid;
95 }
96
mt7996_mac_wtbl_update(struct mt7996_dev * dev,int idx,u32 mask)97 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
98 {
99 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
100 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
101
102 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
103 0, 5000);
104 }
105
mt7996_mac_wtbl_lmac_addr(struct mt7996_dev * dev,u16 wcid,u8 dw)106 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
107 {
108 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
109 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
110
111 return MT_WTBL_LMAC_OFFS(wcid, dw);
112 }
113
mt7996_mac_sta_poll(struct mt7996_dev * dev)114 static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
115 {
116 static const u8 ac_to_tid[] = {
117 [IEEE80211_AC_BE] = 0,
118 [IEEE80211_AC_BK] = 1,
119 [IEEE80211_AC_VI] = 4,
120 [IEEE80211_AC_VO] = 6
121 };
122 struct mt7996_sta_link *msta_link;
123 struct mt76_vif_link *mlink;
124 struct ieee80211_sta *sta;
125 struct mt7996_sta *msta;
126 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
127 LIST_HEAD(sta_poll_list);
128 struct mt76_wcid *wcid;
129 int i;
130
131 spin_lock_bh(&dev->mt76.sta_poll_lock);
132 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
133 spin_unlock_bh(&dev->mt76.sta_poll_lock);
134
135 rcu_read_lock();
136
137 while (true) {
138 bool clear = false;
139 u32 addr, val;
140 u16 idx;
141 s8 rssi[4];
142
143 spin_lock_bh(&dev->mt76.sta_poll_lock);
144 if (list_empty(&sta_poll_list)) {
145 spin_unlock_bh(&dev->mt76.sta_poll_lock);
146 break;
147 }
148 msta_link = list_first_entry(&sta_poll_list,
149 struct mt7996_sta_link,
150 wcid.poll_list);
151 msta = msta_link->sta;
152 wcid = &msta_link->wcid;
153 list_del_init(&wcid->poll_list);
154 spin_unlock_bh(&dev->mt76.sta_poll_lock);
155
156 idx = wcid->idx;
157
158 /* refresh peer's airtime reporting */
159 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
160
161 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
162 u32 tx_last = msta_link->airtime_ac[i];
163 u32 rx_last = msta_link->airtime_ac[i + 4];
164
165 msta_link->airtime_ac[i] = mt76_rr(dev, addr);
166 msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
167
168 tx_time[i] = msta_link->airtime_ac[i] - tx_last;
169 rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last;
170
171 if ((tx_last | rx_last) & BIT(30))
172 clear = true;
173
174 addr += 8;
175 }
176
177 if (clear) {
178 mt7996_mac_wtbl_update(dev, idx,
179 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
180 memset(msta_link->airtime_ac, 0,
181 sizeof(msta_link->airtime_ac));
182 }
183
184 if (!wcid->sta)
185 continue;
186
187 sta = container_of((void *)msta, struct ieee80211_sta,
188 drv_priv);
189 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
190 u8 q = mt76_connac_lmac_mapping(i);
191 u32 tx_cur = tx_time[q];
192 u32 rx_cur = rx_time[q];
193 u8 tid = ac_to_tid[i];
194
195 if (!tx_cur && !rx_cur)
196 continue;
197
198 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
199 }
200
201 /* get signal strength of resp frames (CTS/BA/ACK) */
202 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
203 val = mt76_rr(dev, addr);
204
205 rssi[0] = to_rssi(GENMASK(7, 0), val);
206 rssi[1] = to_rssi(GENMASK(15, 8), val);
207 rssi[2] = to_rssi(GENMASK(23, 16), val);
208 rssi[3] = to_rssi(GENMASK(31, 14), val);
209
210 mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]);
211 if (mlink) {
212 struct mt76_phy *mphy = mt76_vif_link_phy(mlink);
213
214 if (mphy)
215 msta_link->ack_signal =
216 mt76_rx_signal(mphy->antenna_mask,
217 rssi);
218 }
219
220 ewma_avg_signal_add(&msta_link->avg_ack_signal,
221 -msta_link->ack_signal);
222 }
223
224 rcu_read_unlock();
225 }
226
227 /* The HW does not translate the mac header to 802.3 for mesh point */
mt7996_reverse_frag0_hdr_trans(struct sk_buff * skb,u16 hdr_gap)228 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
229 {
230 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
231 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
232 struct mt7996_sta_link *msta_link = (void *)status->wcid;
233 struct mt7996_sta *msta = msta_link->sta;
234 struct ieee80211_bss_conf *link_conf;
235 __le32 *rxd = (__le32 *)skb->data;
236 struct ieee80211_sta *sta;
237 struct ieee80211_vif *vif;
238 struct ieee80211_hdr hdr;
239 u16 frame_control;
240
241 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
242 MT_RXD3_NORMAL_U2M)
243 return -EINVAL;
244
245 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
246 return -EINVAL;
247
248 if (!msta || !msta->vif)
249 return -EINVAL;
250
251 sta = wcid_to_sta(status->wcid);
252 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
253 link_conf = rcu_dereference(vif->link_conf[msta_link->wcid.link_id]);
254 if (!link_conf)
255 return -EINVAL;
256
257 /* store the info from RXD and ethhdr to avoid being overridden */
258 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
259 hdr.frame_control = cpu_to_le16(frame_control);
260 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
261 hdr.duration_id = 0;
262
263 ether_addr_copy(hdr.addr1, vif->addr);
264 ether_addr_copy(hdr.addr2, sta->addr);
265 switch (frame_control & (IEEE80211_FCTL_TODS |
266 IEEE80211_FCTL_FROMDS)) {
267 case 0:
268 ether_addr_copy(hdr.addr3, link_conf->bssid);
269 break;
270 case IEEE80211_FCTL_FROMDS:
271 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
272 break;
273 case IEEE80211_FCTL_TODS:
274 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
275 break;
276 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
277 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
278 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
279 break;
280 default:
281 return -EINVAL;
282 }
283
284 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
285 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
286 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
287 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
288 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
289 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
290 else
291 skb_pull(skb, 2);
292
293 if (ieee80211_has_order(hdr.frame_control))
294 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
295 IEEE80211_HT_CTL_LEN);
296 if (ieee80211_is_data_qos(hdr.frame_control)) {
297 __le16 qos_ctrl;
298
299 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
300 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
301 IEEE80211_QOS_CTL_LEN);
302 }
303
304 if (ieee80211_has_a4(hdr.frame_control))
305 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
306 else
307 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
308
309 return 0;
310 }
311
312 static int
mt7996_mac_fill_rx_rate(struct mt7996_dev * dev,struct mt76_rx_status * status,struct ieee80211_supported_band * sband,__le32 * rxv,u8 * mode)313 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
314 struct mt76_rx_status *status,
315 struct ieee80211_supported_band *sband,
316 __le32 *rxv, u8 *mode)
317 {
318 u32 v0, v2;
319 u8 stbc, gi, bw, dcm, nss;
320 int i, idx;
321 bool cck = false;
322
323 v0 = le32_to_cpu(rxv[0]);
324 v2 = le32_to_cpu(rxv[2]);
325
326 idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
327 i = idx;
328 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
329
330 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
331 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
332 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
333 dcm = FIELD_GET(MT_PRXV_DCM, v2);
334 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
335
336 switch (*mode) {
337 case MT_PHY_TYPE_CCK:
338 cck = true;
339 fallthrough;
340 case MT_PHY_TYPE_OFDM:
341 i = mt76_get_rate(&dev->mt76, sband, i, cck);
342 break;
343 case MT_PHY_TYPE_HT_GF:
344 case MT_PHY_TYPE_HT:
345 status->encoding = RX_ENC_HT;
346 if (gi)
347 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
348 if (i > 31)
349 return -EINVAL;
350 break;
351 case MT_PHY_TYPE_VHT:
352 status->nss = nss;
353 status->encoding = RX_ENC_VHT;
354 if (gi)
355 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
356 if (i > 11)
357 return -EINVAL;
358 break;
359 case MT_PHY_TYPE_HE_MU:
360 case MT_PHY_TYPE_HE_SU:
361 case MT_PHY_TYPE_HE_EXT_SU:
362 case MT_PHY_TYPE_HE_TB:
363 status->nss = nss;
364 status->encoding = RX_ENC_HE;
365 i &= GENMASK(3, 0);
366
367 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
368 status->he_gi = gi;
369
370 status->he_dcm = dcm;
371 break;
372 case MT_PHY_TYPE_EHT_SU:
373 case MT_PHY_TYPE_EHT_TRIG:
374 case MT_PHY_TYPE_EHT_MU:
375 status->nss = nss;
376 status->encoding = RX_ENC_EHT;
377 i &= GENMASK(3, 0);
378
379 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
380 status->eht.gi = gi;
381 break;
382 default:
383 return -EINVAL;
384 }
385 status->rate_idx = i;
386
387 switch (bw) {
388 case IEEE80211_STA_RX_BW_20:
389 break;
390 case IEEE80211_STA_RX_BW_40:
391 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
392 (idx & MT_PRXV_TX_ER_SU_106T)) {
393 status->bw = RATE_INFO_BW_HE_RU;
394 status->he_ru =
395 NL80211_RATE_INFO_HE_RU_ALLOC_106;
396 } else {
397 status->bw = RATE_INFO_BW_40;
398 }
399 break;
400 case IEEE80211_STA_RX_BW_80:
401 status->bw = RATE_INFO_BW_80;
402 break;
403 case IEEE80211_STA_RX_BW_160:
404 status->bw = RATE_INFO_BW_160;
405 break;
406 /* rxv reports bw 320-1 and 320-2 separately */
407 case IEEE80211_STA_RX_BW_320:
408 case IEEE80211_STA_RX_BW_320 + 1:
409 status->bw = RATE_INFO_BW_320;
410 break;
411 default:
412 return -EINVAL;
413 }
414
415 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
416 if (*mode < MT_PHY_TYPE_HE_SU && gi)
417 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
418
419 return 0;
420 }
421
422 static void
mt7996_wed_check_ppe(struct mt7996_dev * dev,struct mt76_queue * q,struct mt7996_sta * msta,struct sk_buff * skb,u32 info)423 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
424 struct mt7996_sta *msta, struct sk_buff *skb,
425 u32 info)
426 {
427 struct ieee80211_vif *vif;
428 struct wireless_dev *wdev;
429
430 if (!msta || !msta->vif)
431 return;
432
433 if (!mt76_queue_is_wed_rx(q))
434 return;
435
436 if (!(info & MT_DMA_INFO_PPE_VLD))
437 return;
438
439 vif = container_of((void *)msta->vif, struct ieee80211_vif,
440 drv_priv);
441 wdev = ieee80211_vif_to_wdev(vif);
442 skb->dev = wdev->netdev;
443
444 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
445 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
446 FIELD_GET(MT_DMA_PPE_ENTRY, info));
447 }
448
449 static int
mt7996_mac_fill_rx(struct mt7996_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)450 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
451 struct sk_buff *skb, u32 *info)
452 {
453 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
454 struct mt76_phy *mphy = &dev->mt76.phy;
455 struct mt7996_phy *phy = &dev->phy;
456 struct ieee80211_supported_band *sband;
457 __le32 *rxd = (__le32 *)skb->data;
458 __le32 *rxv = NULL;
459 u32 rxd0 = le32_to_cpu(rxd[0]);
460 u32 rxd1 = le32_to_cpu(rxd[1]);
461 u32 rxd2 = le32_to_cpu(rxd[2]);
462 u32 rxd3 = le32_to_cpu(rxd[3]);
463 u32 rxd4 = le32_to_cpu(rxd[4]);
464 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
465 u32 csum_status = *(u32 *)skb->cb;
466 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
467 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
468 bool unicast, insert_ccmp_hdr = false;
469 u8 remove_pad, amsdu_info, band_idx;
470 u8 mode = 0, qos_ctl = 0;
471 bool hdr_trans;
472 u16 hdr_gap;
473 u16 seq_ctrl = 0;
474 __le16 fc = 0;
475 int idx;
476 u8 hw_aggr = false;
477 struct mt7996_sta *msta = NULL;
478
479 hw_aggr = status->aggr;
480 memset(status, 0, sizeof(*status));
481
482 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
483 mphy = dev->mt76.phys[band_idx];
484 phy = mphy->priv;
485 status->phy_idx = mphy->band_idx;
486
487 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
488 return -EINVAL;
489
490 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
491 return -EINVAL;
492
493 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
494 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
495 return -EINVAL;
496
497 /* ICV error or CCMP/BIP/WPI MIC error */
498 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
499 status->flag |= RX_FLAG_ONLY_MONITOR;
500
501 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
502 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
503 status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx);
504
505 if (status->wcid) {
506 struct mt7996_sta_link *msta_link;
507
508 msta_link = container_of(status->wcid, struct mt7996_sta_link,
509 wcid);
510 msta = msta_link->sta;
511 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
512 }
513
514 status->freq = mphy->chandef.chan->center_freq;
515 status->band = mphy->chandef.chan->band;
516 if (status->band == NL80211_BAND_5GHZ)
517 sband = &mphy->sband_5g.sband;
518 else if (status->band == NL80211_BAND_6GHZ)
519 sband = &mphy->sband_6g.sband;
520 else
521 sband = &mphy->sband_2g.sband;
522
523 if (!sband->channels)
524 return -EINVAL;
525
526 if ((rxd3 & csum_mask) == csum_mask &&
527 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
528 skb->ip_summed = CHECKSUM_UNNECESSARY;
529
530 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
531 status->flag |= RX_FLAG_FAILED_FCS_CRC;
532
533 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
534 status->flag |= RX_FLAG_MMIC_ERROR;
535
536 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
537 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
538 status->flag |= RX_FLAG_DECRYPTED;
539 status->flag |= RX_FLAG_IV_STRIPPED;
540 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
541 }
542
543 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
544
545 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
546 return -EINVAL;
547
548 rxd += 8;
549 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
550 u32 v0 = le32_to_cpu(rxd[0]);
551 u32 v2 = le32_to_cpu(rxd[2]);
552
553 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
554 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
555 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
556
557 rxd += 4;
558 if ((u8 *)rxd - skb->data >= skb->len)
559 return -EINVAL;
560 }
561
562 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
563 u8 *data = (u8 *)rxd;
564
565 if (status->flag & RX_FLAG_DECRYPTED) {
566 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
567 case MT_CIPHER_AES_CCMP:
568 case MT_CIPHER_CCMP_CCX:
569 case MT_CIPHER_CCMP_256:
570 insert_ccmp_hdr =
571 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
572 fallthrough;
573 case MT_CIPHER_TKIP:
574 case MT_CIPHER_TKIP_NO_MIC:
575 case MT_CIPHER_GCMP:
576 case MT_CIPHER_GCMP_256:
577 status->iv[0] = data[5];
578 status->iv[1] = data[4];
579 status->iv[2] = data[3];
580 status->iv[3] = data[2];
581 status->iv[4] = data[1];
582 status->iv[5] = data[0];
583 break;
584 default:
585 break;
586 }
587 }
588 rxd += 4;
589 if ((u8 *)rxd - skb->data >= skb->len)
590 return -EINVAL;
591 }
592
593 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
594 status->timestamp = le32_to_cpu(rxd[0]);
595 status->flag |= RX_FLAG_MACTIME_START;
596
597 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
598 status->flag |= RX_FLAG_AMPDU_DETAILS;
599
600 /* all subframes of an A-MPDU have the same timestamp */
601 if (phy->rx_ampdu_ts != status->timestamp) {
602 if (!++phy->ampdu_ref)
603 phy->ampdu_ref++;
604 }
605 phy->rx_ampdu_ts = status->timestamp;
606
607 status->ampdu_ref = phy->ampdu_ref;
608 }
609
610 rxd += 4;
611 if ((u8 *)rxd - skb->data >= skb->len)
612 return -EINVAL;
613 }
614
615 /* RXD Group 3 - P-RXV */
616 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
617 u32 v3;
618 int ret;
619
620 rxv = rxd;
621 rxd += 4;
622 if ((u8 *)rxd - skb->data >= skb->len)
623 return -EINVAL;
624
625 v3 = le32_to_cpu(rxv[3]);
626
627 status->chains = mphy->antenna_mask;
628 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
629 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
630 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
631 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
632
633 /* RXD Group 5 - C-RXV */
634 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
635 rxd += 24;
636 if ((u8 *)rxd - skb->data >= skb->len)
637 return -EINVAL;
638 }
639
640 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
641 if (ret < 0)
642 return ret;
643 }
644
645 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
646 status->amsdu = !!amsdu_info;
647 if (status->amsdu) {
648 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
649 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
650 }
651
652 /* IEEE 802.11 fragmentation can only be applied to unicast frames.
653 * Hence, drop fragments with multicast/broadcast RA.
654 * This check fixes vulnerabilities, like CVE-2020-26145.
655 */
656 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) &&
657 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M)
658 return -EINVAL;
659
660 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
661 if (hdr_trans && ieee80211_has_morefrags(fc)) {
662 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
663 return -EINVAL;
664 hdr_trans = false;
665 } else {
666 int pad_start = 0;
667
668 skb_pull(skb, hdr_gap);
669 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
670 pad_start = ieee80211_get_hdrlen_from_skb(skb);
671 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
672 /* When header translation failure is indicated,
673 * the hardware will insert an extra 2-byte field
674 * containing the data length after the protocol
675 * type field. This happens either when the LLC-SNAP
676 * pattern did not match, or if a VLAN header was
677 * detected.
678 */
679 pad_start = 12;
680 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
681 pad_start += 4;
682 else
683 pad_start = 0;
684 }
685
686 if (pad_start) {
687 memmove(skb->data + 2, skb->data, pad_start);
688 skb_pull(skb, 2);
689 }
690 }
691
692 if (!hdr_trans) {
693 struct ieee80211_hdr *hdr;
694
695 if (insert_ccmp_hdr) {
696 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
697
698 mt76_insert_ccmp_hdr(skb, key_id);
699 }
700
701 hdr = mt76_skb_get_hdr(skb);
702 fc = hdr->frame_control;
703 if (ieee80211_is_data_qos(fc)) {
704 u8 *qos = ieee80211_get_qos_ctl(hdr);
705
706 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
707 qos_ctl = *qos;
708
709 /* Mesh DA/SA/Length will be stripped after hardware
710 * de-amsdu, so here needs to clear amsdu present bit
711 * to mark it as a normal mesh frame.
712 */
713 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
714 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
715 }
716 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
717 } else {
718 status->flag |= RX_FLAG_8023;
719 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
720 *info);
721 mt76_npu_check_ppe(&dev->mt76, skb, *info);
722 }
723
724 if (rxv && !(status->flag & RX_FLAG_8023)) {
725 switch (status->encoding) {
726 case RX_ENC_EHT:
727 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
728 break;
729 case RX_ENC_HE:
730 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
731 break;
732 default:
733 break;
734 }
735 }
736
737 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
738 return 0;
739
740 status->aggr = unicast &&
741 !ieee80211_is_qos_nullfunc(fc);
742 status->qos_ctl = qos_ctl;
743 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
744
745 return 0;
746 }
747
748 static void
mt7996_mac_write_txwi_8023(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)749 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
750 struct sk_buff *skb, struct mt76_wcid *wcid)
751 {
752 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
753 u8 fc_type, fc_stype;
754 u16 ethertype;
755 bool wmm = false;
756 u32 val;
757
758 if (wcid->sta) {
759 struct ieee80211_sta *sta = wcid_to_sta(wcid);
760
761 wmm = sta->wme;
762 }
763
764 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
765 FIELD_PREP(MT_TXD1_TID, tid);
766
767 ethertype = get_unaligned_be16(&skb->data[12]);
768 if (ethertype >= ETH_P_802_3_MIN)
769 val |= MT_TXD1_ETH_802_3;
770
771 txwi[1] |= cpu_to_le32(val);
772
773 fc_type = IEEE80211_FTYPE_DATA >> 2;
774 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
775
776 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
777 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
778
779 txwi[2] |= cpu_to_le32(val);
780
781 if (wcid->amsdu)
782 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
783 }
784
785 static void
mt7996_mac_write_txwi_80211(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key,struct mt76_wcid * wcid)786 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
787 struct sk_buff *skb,
788 struct ieee80211_key_conf *key,
789 struct mt76_wcid *wcid)
790 {
791 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
792 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
793 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
794 bool multicast = is_multicast_ether_addr(hdr->addr1);
795 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
796 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
797 u16 seqno = le16_to_cpu(sc);
798 bool hw_bigtk = false;
799 u8 fc_type, fc_stype;
800 u32 val;
801
802 if (ieee80211_is_action(fc) &&
803 skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 &&
804 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
805 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
806 if (is_mt7990(&dev->mt76))
807 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
808 else
809 txwi[7] |= cpu_to_le32(MT_TXD7_MAC_TXD);
810
811 tid = MT_TX_ADDBA;
812 } else if (ieee80211_is_mgmt(hdr->frame_control)) {
813 tid = MT_TX_NORMAL;
814 }
815
816 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
817 FIELD_PREP(MT_TXD1_HDR_INFO,
818 ieee80211_get_hdrlen_from_skb(skb) / 2) |
819 FIELD_PREP(MT_TXD1_TID, tid);
820
821 if (!ieee80211_is_data(fc) || multicast ||
822 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
823 val |= MT_TXD1_FIXED_RATE;
824
825 if (is_mt7990(&dev->mt76) && ieee80211_is_beacon(fc) &&
826 (wcid->hw_key_idx2 == 6 || wcid->hw_key_idx2 == 7))
827 hw_bigtk = true;
828
829 if ((key && multicast && ieee80211_is_robust_mgmt_frame(skb)) || hw_bigtk) {
830 val |= MT_TXD1_BIP;
831 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
832 }
833
834 txwi[1] |= cpu_to_le32(val);
835
836 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
837 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
838
839 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
840 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
841
842 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
843 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
844 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
845 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
846 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
847 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
848 else
849 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
850
851 txwi[2] |= cpu_to_le32(val);
852
853 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
854 if (ieee80211_is_beacon(fc)) {
855 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
856 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
857 }
858
859 if (multicast && ieee80211_vif_is_mld(info->control.vif)) {
860 val = MT_TXD3_SN_VALID |
861 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
862 txwi[3] |= cpu_to_le32(val);
863 }
864
865 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
866 if (ieee80211_is_back_req(hdr->frame_control)) {
867 struct ieee80211_bar *bar;
868
869 bar = (struct ieee80211_bar *)skb->data;
870 seqno = le16_to_cpu(bar->start_seq_num);
871 }
872
873 val = MT_TXD3_SN_VALID |
874 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
875 txwi[3] |= cpu_to_le32(val);
876 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
877 }
878
879 if (ieee80211_vif_is_mld(info->control.vif) &&
880 (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))))
881 txwi[5] |= cpu_to_le32(MT_TXD5_FL);
882
883 if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) &&
884 ieee80211_vif_is_mld(info->control.vif)) {
885 txwi[5] |= cpu_to_le32(MT_TXD5_FL);
886 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
887 }
888
889 if (!wcid->sta && ieee80211_is_mgmt(fc))
890 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
891 }
892
mt7996_mac_write_txwi(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,int pid,enum mt76_txq_id qid,u32 changed)893 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
894 struct sk_buff *skb, struct mt76_wcid *wcid,
895 struct ieee80211_key_conf *key, int pid,
896 enum mt76_txq_id qid, u32 changed)
897 {
898 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
899 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
900 struct ieee80211_vif *vif = info->control.vif;
901 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
902 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
903 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
904 struct mt76_vif_link *mlink = NULL;
905 struct mt7996_vif *mvif;
906 unsigned int link_id;
907 u16 tx_count = 15;
908 u32 val;
909 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
910 BSS_CHANGED_FILS_DISCOVERY));
911 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
912 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
913
914 if (wcid != &dev->mt76.global_wcid)
915 link_id = wcid->link_id;
916 else
917 link_id = u32_get_bits(info->control.flags,
918 IEEE80211_TX_CTRL_MLO_LINK);
919
920 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
921 if (mvif) {
922 if (wcid->offchannel)
923 mlink = rcu_dereference(mvif->mt76.offchannel_link);
924 if (!mlink)
925 mlink = rcu_dereference(mvif->mt76.link[link_id]);
926 }
927
928 if (mlink) {
929 omac_idx = mlink->omac_idx;
930 wmm_idx = mlink->wmm_idx;
931 band_idx = mlink->band_idx;
932 }
933
934 if (inband_disc) {
935 p_fmt = MT_TX_TYPE_FW;
936 q_idx = MT_LMAC_ALTX0;
937 } else if (beacon) {
938 p_fmt = MT_TX_TYPE_FW;
939 q_idx = MT_LMAC_BCN0;
940 } else if (qid >= MT_TXQ_PSD) {
941 p_fmt = MT_TX_TYPE_CT;
942 q_idx = MT_LMAC_ALTX0;
943 } else {
944 p_fmt = MT_TX_TYPE_CT;
945 q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
946 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
947 }
948
949 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
950 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
951 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
952 txwi[0] = cpu_to_le32(val);
953
954 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
955 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
956
957 if (band_idx)
958 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
959
960 txwi[1] = cpu_to_le32(val);
961 txwi[2] = 0;
962
963 val = MT_TXD3_SW_POWER_MGMT |
964 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
965 if (key)
966 val |= MT_TXD3_PROTECT_FRAME;
967 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
968 val |= MT_TXD3_NO_ACK;
969
970 txwi[3] = cpu_to_le32(val);
971 txwi[4] = 0;
972
973 val = FIELD_PREP(MT_TXD5_PID, pid);
974 if (pid >= MT_PACKET_ID_FIRST)
975 val |= MT_TXD5_TX_STATUS_HOST;
976 txwi[5] = cpu_to_le32(val);
977
978 val = MT_TXD6_DAS | MT_TXD6_VTA;
979 if ((q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) ||
980 skb->protocol == cpu_to_be16(ETH_P_PAE))
981 val |= MT_TXD6_DIS_MAT;
982
983 if (is_mt7996(&dev->mt76))
984 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
985 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
986 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
987
988 txwi[6] = cpu_to_le32(val);
989 txwi[7] = 0;
990
991 if (is_8023)
992 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
993 else
994 mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid);
995
996 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
997 bool mcast = ieee80211_is_data(hdr->frame_control) &&
998 is_multicast_ether_addr(hdr->addr1);
999 u8 idx = MT7996_BASIC_RATES_TBL;
1000
1001 if (mlink) {
1002 if (mcast && mlink->mcast_rates_idx)
1003 idx = mlink->mcast_rates_idx;
1004 else if (beacon && mlink->beacon_rates_idx)
1005 idx = mlink->beacon_rates_idx;
1006 else
1007 idx = mlink->basic_rates_idx;
1008 }
1009
1010 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
1011 if (mcast)
1012 val |= MT_TXD6_DIS_MAT;
1013 txwi[6] |= cpu_to_le32(val);
1014 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
1015 }
1016 }
1017
1018 static bool
mt7996_tx_use_mgmt(struct mt7996_dev * dev,struct sk_buff * skb)1019 mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb)
1020 {
1021 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1022
1023 if (ieee80211_is_mgmt(hdr->frame_control))
1024 return true;
1025
1026 /* for SDO to bypass specific data frame */
1027 if (!mt7996_has_wa(dev)) {
1028 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
1029 return true;
1030
1031 if (ieee80211_has_a4(hdr->frame_control) &&
1032 !ieee80211_is_data_present(hdr->frame_control))
1033 return true;
1034 }
1035
1036 return false;
1037 }
1038
mt7996_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)1039 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1040 enum mt76_txq_id qid, struct mt76_wcid *wcid,
1041 struct ieee80211_sta *sta,
1042 struct mt76_tx_info *tx_info)
1043 {
1044 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1045 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1046 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1047 struct ieee80211_key_conf *key = info->control.hw_key;
1048 struct ieee80211_vif *vif = info->control.vif;
1049 struct mt7996_vif *mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
1050 struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL;
1051 struct mt76_vif_link *mlink = NULL;
1052 struct mt76_txwi_cache *t;
1053 int id, i, pid, nbuf = tx_info->nbuf - 1;
1054 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1055 __le32 *ptr = (__le32 *)txwi_ptr;
1056 u8 *txwi = (u8 *)txwi_ptr;
1057 u8 link_id;
1058
1059 if (unlikely(tx_info->skb->len <= ETH_HLEN))
1060 return -EINVAL;
1061
1062 if (!wcid)
1063 wcid = &dev->mt76.global_wcid;
1064
1065 if ((is_8023 || ieee80211_is_data_qos(hdr->frame_control)) && sta->mlo &&
1066 likely(tx_info->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
1067 u8 tid = tx_info->skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1068
1069 link_id = (tid % 2) ? msta->seclink_id : msta->deflink_id;
1070 } else {
1071 link_id = u32_get_bits(info->control.flags,
1072 IEEE80211_TX_CTRL_MLO_LINK);
1073 }
1074
1075 if (link_id != wcid->link_id && link_id != IEEE80211_LINK_UNSPECIFIED) {
1076 if (msta) {
1077 struct mt7996_sta_link *msta_link =
1078 rcu_dereference(msta->link[link_id]);
1079
1080 if (msta_link)
1081 wcid = &msta_link->wcid;
1082 } else if (mvif) {
1083 mlink = rcu_dereference(mvif->mt76.link[link_id]);
1084 if (mlink && mlink->wcid)
1085 wcid = mlink->wcid;
1086 }
1087 }
1088
1089 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
1090 t->skb = tx_info->skb;
1091
1092 id = mt76_token_consume(mdev, &t);
1093 if (id < 0)
1094 return id;
1095
1096 /* Since the rules of HW MLD address translation are not fully
1097 * compatible with 802.11 EAPOL frame, we do the translation by
1098 * software
1099 */
1100 if (tx_info->skb->protocol == cpu_to_be16(ETH_P_PAE) && sta->mlo) {
1101 struct ieee80211_hdr *hdr = (void *)tx_info->skb->data;
1102 struct ieee80211_bss_conf *link_conf;
1103 struct ieee80211_link_sta *link_sta;
1104
1105 link_conf = rcu_dereference(vif->link_conf[wcid->link_id]);
1106 if (!link_conf)
1107 return -EINVAL;
1108
1109 link_sta = rcu_dereference(sta->link[wcid->link_id]);
1110 if (!link_sta)
1111 return -EINVAL;
1112
1113 dma_sync_single_for_cpu(mdev->dma_dev, tx_info->buf[1].addr,
1114 tx_info->buf[1].len, DMA_TO_DEVICE);
1115
1116 memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
1117 memcpy(hdr->addr2, link_conf->addr, ETH_ALEN);
1118 if (ieee80211_has_a4(hdr->frame_control)) {
1119 memcpy(hdr->addr3, sta->addr, ETH_ALEN);
1120 memcpy(hdr->addr4, vif->addr, ETH_ALEN);
1121 } else if (ieee80211_has_tods(hdr->frame_control)) {
1122 memcpy(hdr->addr3, sta->addr, ETH_ALEN);
1123 } else if (ieee80211_has_fromds(hdr->frame_control)) {
1124 memcpy(hdr->addr3, vif->addr, ETH_ALEN);
1125 }
1126
1127 dma_sync_single_for_device(mdev->dma_dev, tx_info->buf[1].addr,
1128 tx_info->buf[1].len, DMA_TO_DEVICE);
1129 }
1130
1131 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
1132 memset(txwi_ptr, 0, MT_TXD_SIZE);
1133 /* Transmit non qos data by 802.11 header and need to fill txd by host*/
1134 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1135 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
1136 pid, qid, 0);
1137
1138 /* MT7996 and MT7992 require driver to provide the MAC TXP for AddBA
1139 * req
1140 */
1141 if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) {
1142 u32 val;
1143
1144 ptr = (__le32 *)(txwi + MT_TXD_SIZE);
1145 memset((void *)ptr, 0, sizeof(struct mt76_connac_fw_txp));
1146
1147 val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) |
1148 MT_TXP0_TOKEN_ID0_VALID_MASK;
1149 ptr[0] = cpu_to_le32(val);
1150
1151 val = FIELD_PREP(MT_TXP1_TID_ADDBA,
1152 tx_info->skb->priority &
1153 IEEE80211_QOS_CTL_TID_MASK);
1154 ptr[1] = cpu_to_le32(val);
1155 ptr[2] = cpu_to_le32(tx_info->buf[1].addr & 0xFFFFFFFF);
1156
1157 val = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[1].len) |
1158 MT_TXP3_ML0_MASK;
1159 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1160 val |= FIELD_PREP(MT_TXP3_DMA_ADDR_H,
1161 tx_info->buf[1].addr >> 32);
1162 #endif
1163 ptr[3] = cpu_to_le32(val);
1164 } else {
1165 struct mt76_connac_txp_common *txp;
1166
1167 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
1168 for (i = 0; i < nbuf; i++) {
1169 u16 len;
1170
1171 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
1172 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1173 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
1174 tx_info->buf[i + 1].addr >> 32);
1175 #endif
1176
1177 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
1178 txp->fw.len[i] = cpu_to_le16(len);
1179 }
1180 txp->fw.nbuf = nbuf;
1181
1182 txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
1183
1184 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1185 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
1186
1187 if (!key)
1188 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
1189
1190 if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
1191 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
1192
1193 if (mvif) {
1194 if (wcid->offchannel)
1195 mlink = rcu_dereference(mvif->mt76.offchannel_link);
1196 if (!mlink)
1197 mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
1198
1199 txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
1200 }
1201
1202 txp->fw.token = cpu_to_le16(id);
1203 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
1204 }
1205
1206 tx_info->skb = NULL;
1207
1208 /* pass partial skb header to fw */
1209 tx_info->buf[1].len = MT_CT_PARSE_LEN;
1210 tx_info->buf[1].skip_unmap = true;
1211 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1212
1213 return 0;
1214 }
1215
mt7996_wed_init_buf(void * ptr,dma_addr_t phys,int token_id)1216 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
1217 {
1218 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
1219 __le32 *txwi = ptr;
1220 u32 val;
1221
1222 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
1223
1224 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
1225 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
1226 txwi[0] = cpu_to_le32(val);
1227
1228 val = BIT(31) |
1229 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
1230 txwi[1] = cpu_to_le32(val);
1231
1232 txp->token = cpu_to_le16(token_id);
1233 txp->nbuf = 1;
1234 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
1235
1236 return MT_TXD_SIZE + sizeof(*txp);
1237 }
1238
1239 static void
mt7996_tx_check_aggr(struct ieee80211_link_sta * link_sta,struct mt76_wcid * wcid,struct sk_buff * skb)1240 mt7996_tx_check_aggr(struct ieee80211_link_sta *link_sta,
1241 struct mt76_wcid *wcid, struct sk_buff *skb)
1242 {
1243 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1244 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1245 u16 fc, tid;
1246
1247 if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
1248 return;
1249
1250 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1251 if (tid >= 6) /* skip VO queue */
1252 return;
1253
1254 if (is_8023) {
1255 fc = IEEE80211_FTYPE_DATA |
1256 (link_sta->sta->wme ? IEEE80211_STYPE_QOS_DATA
1257 : IEEE80211_STYPE_DATA);
1258 } else {
1259 /* No need to get precise TID for Action/Management Frame,
1260 * since it will not meet the following Frame Control
1261 * condition anyway.
1262 */
1263
1264 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1265
1266 fc = le16_to_cpu(hdr->frame_control) &
1267 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
1268 }
1269
1270 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1271 return;
1272
1273 if (!test_and_set_bit(tid, &wcid->ampdu_state))
1274 ieee80211_start_tx_ba_session(link_sta->sta, tid, 0);
1275 }
1276
1277 static void
mt7996_txwi_free(struct mt7996_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_link_sta * link_sta,struct mt76_wcid * wcid,struct list_head * free_list)1278 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1279 struct ieee80211_link_sta *link_sta,
1280 struct mt76_wcid *wcid, struct list_head *free_list)
1281 {
1282 struct mt76_dev *mdev = &dev->mt76;
1283 __le32 *txwi;
1284 u16 wcid_idx;
1285
1286 mt76_connac_txp_skb_unmap(mdev, t);
1287 if (!t->skb)
1288 goto out;
1289
1290 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1291 if (link_sta) {
1292 wcid_idx = wcid->idx;
1293 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
1294 struct mt7996_sta *msta;
1295
1296 /* AMPDU state is stored in the primary link */
1297 msta = (void *)link_sta->sta->drv_priv;
1298 mt7996_tx_check_aggr(link_sta, &msta->deflink.wcid,
1299 t->skb);
1300 }
1301 } else {
1302 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
1303 }
1304
1305 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1306
1307 out:
1308 t->skb = NULL;
1309 mt76_put_txwi(mdev, t);
1310 }
1311
1312 static void
mt7996_mac_tx_free(struct mt7996_dev * dev,void * data,int len)1313 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1314 {
1315 __le32 *tx_free = (__le32 *)data, *cur_info;
1316 struct mt76_dev *mdev = &dev->mt76;
1317 struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1318 struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1319 struct ieee80211_link_sta *link_sta = NULL;
1320 struct mt76_txwi_cache *txwi;
1321 struct mt76_wcid *wcid = NULL;
1322 LIST_HEAD(free_list);
1323 struct sk_buff *skb, *tmp;
1324 void *end = data + len;
1325 bool wake = false;
1326 u16 total, count = 0;
1327 u8 ver;
1328
1329 /* clean DMA queues and unmap buffers first */
1330 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1331 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1332 if (phy2) {
1333 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1334 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1335 }
1336 if (phy3) {
1337 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1338 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1339 }
1340
1341 ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER);
1342 if (WARN_ON_ONCE(ver < 5))
1343 return;
1344
1345 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1346 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1347 u32 msdu, info;
1348 u8 i;
1349
1350 if (WARN_ON_ONCE((void *)cur_info >= end))
1351 return;
1352 /* 1'b1: new wcid pair.
1353 * 1'b0: msdu_id with the same 'wcid pair' as above.
1354 */
1355 info = le32_to_cpu(*cur_info);
1356 if (info & MT_TXFREE_INFO_PAIR) {
1357 struct ieee80211_sta *sta;
1358 unsigned long valid_links;
1359 struct mt7996_sta *msta;
1360 unsigned int id;
1361 u16 idx;
1362
1363 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1364 wcid = mt76_wcid_ptr(dev, idx);
1365 sta = wcid_to_sta(wcid);
1366 if (!sta) {
1367 link_sta = NULL;
1368 goto next;
1369 }
1370
1371 link_sta = rcu_dereference(sta->link[wcid->link_id]);
1372 if (!link_sta)
1373 goto next;
1374
1375 msta = (struct mt7996_sta *)sta->drv_priv;
1376 valid_links = sta->valid_links ?: BIT(0);
1377
1378 /* For MLD STA, add all link's wcid to sta_poll_list */
1379 for_each_set_bit(id, &valid_links,
1380 IEEE80211_MLD_MAX_NUM_LINKS) {
1381 struct mt7996_sta_link *msta_link;
1382
1383 msta_link = rcu_dereference(msta->link[id]);
1384 if (!msta_link)
1385 continue;
1386
1387 mt76_wcid_add_poll(&dev->mt76,
1388 &msta_link->wcid);
1389 }
1390 next:
1391 /* ver 7 has a new DW with pair = 1, skip it */
1392 if (ver == 7 && ((void *)(cur_info + 1) < end) &&
1393 (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR))
1394 cur_info++;
1395 continue;
1396 } else if (info & MT_TXFREE_INFO_HEADER) {
1397 u32 tx_retries = 0, tx_failed = 0;
1398
1399 if (!wcid)
1400 continue;
1401
1402 tx_retries =
1403 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1404 tx_failed = tx_retries +
1405 !!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1406
1407 wcid->stats.tx_retries += tx_retries;
1408 wcid->stats.tx_failed += tx_failed;
1409 continue;
1410 }
1411
1412 for (i = 0; i < 2; i++) {
1413 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1414 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1415 continue;
1416
1417 count++;
1418 txwi = mt76_token_release(mdev, msdu, &wake);
1419 if (!txwi)
1420 continue;
1421
1422 mt7996_txwi_free(dev, txwi, link_sta, wcid,
1423 &free_list);
1424 }
1425 }
1426
1427 mt7996_mac_sta_poll(dev);
1428
1429 if (wake)
1430 mt76_set_tx_blocked(&dev->mt76, false);
1431
1432 mt76_worker_schedule(&dev->mt76.tx_worker);
1433
1434 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1435 skb_list_del_init(skb);
1436 napi_consume_skb(skb, 1);
1437 }
1438 }
1439
1440 static bool
mt7996_mac_add_txs_skb(struct mt7996_dev * dev,struct mt76_wcid * wcid,int pid,__le32 * txs_data)1441 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
1442 int pid, __le32 *txs_data)
1443 {
1444 struct mt76_sta_stats *stats = &wcid->stats;
1445 struct ieee80211_supported_band *sband;
1446 struct mt76_dev *mdev = &dev->mt76;
1447 struct mt76_phy *mphy;
1448 struct ieee80211_tx_info *info;
1449 struct sk_buff_head list;
1450 struct rate_info rate = {};
1451 struct sk_buff *skb = NULL;
1452 bool cck = false;
1453 u32 txrate, txs, mode, stbc;
1454
1455 txs = le32_to_cpu(txs_data[0]);
1456
1457 mt76_tx_status_lock(mdev, &list);
1458
1459 /* only report MPDU TXS */
1460 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
1461 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1462 if (skb) {
1463 info = IEEE80211_SKB_CB(skb);
1464 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1465 info->flags |= IEEE80211_TX_STAT_ACK;
1466
1467 info->status.ampdu_len = 1;
1468 info->status.ampdu_ack_len =
1469 !!(info->flags & IEEE80211_TX_STAT_ACK);
1470
1471 info->status.rates[0].idx = -1;
1472 }
1473 }
1474
1475 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
1476 struct ieee80211_sta *sta;
1477 u8 tid;
1478
1479 sta = wcid_to_sta(wcid);
1480 tid = FIELD_GET(MT_TXS0_TID, txs);
1481 ieee80211_refresh_tx_agg_session_timer(sta, tid);
1482 }
1483
1484 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1485
1486 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1487 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1488 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1489
1490 if (stbc && rate.nss > 1)
1491 rate.nss >>= 1;
1492
1493 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1494 stats->tx_nss[rate.nss - 1]++;
1495 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1496 stats->tx_mcs[rate.mcs]++;
1497
1498 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1499 switch (mode) {
1500 case MT_PHY_TYPE_CCK:
1501 cck = true;
1502 fallthrough;
1503 case MT_PHY_TYPE_OFDM:
1504 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1505
1506 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1507 sband = &mphy->sband_5g.sband;
1508 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1509 sband = &mphy->sband_6g.sband;
1510 else
1511 sband = &mphy->sband_2g.sband;
1512
1513 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1514 rate.legacy = sband->bitrates[rate.mcs].bitrate;
1515 break;
1516 case MT_PHY_TYPE_HT:
1517 case MT_PHY_TYPE_HT_GF:
1518 if (rate.mcs > 31)
1519 goto out;
1520
1521 rate.flags = RATE_INFO_FLAGS_MCS;
1522 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1523 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1524 break;
1525 case MT_PHY_TYPE_VHT:
1526 if (rate.mcs > 9)
1527 goto out;
1528
1529 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1530 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1531 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1532 break;
1533 case MT_PHY_TYPE_HE_SU:
1534 case MT_PHY_TYPE_HE_EXT_SU:
1535 case MT_PHY_TYPE_HE_TB:
1536 case MT_PHY_TYPE_HE_MU:
1537 if (rate.mcs > 11)
1538 goto out;
1539
1540 rate.he_gi = wcid->rate.he_gi;
1541 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1542 rate.flags = RATE_INFO_FLAGS_HE_MCS;
1543 break;
1544 case MT_PHY_TYPE_EHT_SU:
1545 case MT_PHY_TYPE_EHT_TRIG:
1546 case MT_PHY_TYPE_EHT_MU:
1547 if (rate.mcs > 13)
1548 goto out;
1549
1550 rate.eht_gi = wcid->rate.eht_gi;
1551 rate.flags = RATE_INFO_FLAGS_EHT_MCS;
1552 break;
1553 default:
1554 goto out;
1555 }
1556
1557 stats->tx_mode[mode]++;
1558
1559 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1560 case IEEE80211_STA_RX_BW_320:
1561 rate.bw = RATE_INFO_BW_320;
1562 stats->tx_bw[4]++;
1563 break;
1564 case IEEE80211_STA_RX_BW_160:
1565 rate.bw = RATE_INFO_BW_160;
1566 stats->tx_bw[3]++;
1567 break;
1568 case IEEE80211_STA_RX_BW_80:
1569 rate.bw = RATE_INFO_BW_80;
1570 stats->tx_bw[2]++;
1571 break;
1572 case IEEE80211_STA_RX_BW_40:
1573 rate.bw = RATE_INFO_BW_40;
1574 stats->tx_bw[1]++;
1575 break;
1576 default:
1577 rate.bw = RATE_INFO_BW_20;
1578 stats->tx_bw[0]++;
1579 break;
1580 }
1581 wcid->rate = rate;
1582
1583 out:
1584 if (skb)
1585 mt76_tx_status_skb_done(mdev, skb, &list);
1586 mt76_tx_status_unlock(mdev, &list);
1587
1588 return !!skb;
1589 }
1590
mt7996_mac_add_txs(struct mt7996_dev * dev,void * data)1591 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1592 {
1593 struct mt7996_sta_link *msta_link;
1594 struct mt76_wcid *wcid;
1595 __le32 *txs_data = data;
1596 u16 wcidx;
1597 u8 pid;
1598
1599 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1600 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1601
1602 if (pid < MT_PACKET_ID_NO_SKB)
1603 return;
1604
1605 rcu_read_lock();
1606
1607 wcid = mt76_wcid_ptr(dev, wcidx);
1608 if (!wcid)
1609 goto out;
1610
1611 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
1612
1613 if (!wcid->sta)
1614 goto out;
1615
1616 msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
1617 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
1618
1619 out:
1620 rcu_read_unlock();
1621 }
1622
mt7996_rx_check(struct mt76_dev * mdev,void * data,int len)1623 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1624 {
1625 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1626 __le32 *rxd = (__le32 *)data;
1627 __le32 *end = (__le32 *)&rxd[len / 4];
1628 enum rx_pkt_type type;
1629
1630 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1631 if (type != PKT_TYPE_NORMAL) {
1632 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1633
1634 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1635 MT_RXD0_SW_PKT_TYPE_FRAME))
1636 return true;
1637 }
1638
1639 switch (type) {
1640 case PKT_TYPE_TXRX_NOTIFY:
1641 mt7996_mac_tx_free(dev, data, len);
1642 return false;
1643 case PKT_TYPE_TXS:
1644 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1645 mt7996_mac_add_txs(dev, rxd);
1646 return false;
1647 case PKT_TYPE_RX_FW_MONITOR:
1648 mt7996_debugfs_rx_fw_monitor(dev, data, len);
1649 return false;
1650 default:
1651 return true;
1652 }
1653 }
1654
mt7996_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1655 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1656 struct sk_buff *skb, u32 *info)
1657 {
1658 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1659 __le32 *rxd = (__le32 *)skb->data;
1660 __le32 *end = (__le32 *)&skb->data[skb->len];
1661 enum rx_pkt_type type;
1662
1663 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1664 if (type != PKT_TYPE_NORMAL) {
1665 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1666
1667 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1668 MT_RXD0_SW_PKT_TYPE_FRAME))
1669 type = PKT_TYPE_NORMAL;
1670 }
1671
1672 switch (type) {
1673 case PKT_TYPE_TXRX_NOTIFY:
1674 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
1675 q == MT_RXQ_TXFREE_BAND2) {
1676 dev_kfree_skb(skb);
1677 break;
1678 }
1679
1680 mt7996_mac_tx_free(dev, skb->data, skb->len);
1681 napi_consume_skb(skb, 1);
1682 break;
1683 case PKT_TYPE_RX_EVENT:
1684 mt7996_mcu_rx_event(dev, skb);
1685 break;
1686 case PKT_TYPE_TXS:
1687 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1688 mt7996_mac_add_txs(dev, rxd);
1689 dev_kfree_skb(skb);
1690 break;
1691 case PKT_TYPE_RX_FW_MONITOR:
1692 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1693 dev_kfree_skb(skb);
1694 break;
1695 case PKT_TYPE_NORMAL:
1696 if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
1697 mt76_rx(&dev->mt76, q, skb);
1698 return;
1699 }
1700 fallthrough;
1701 default:
1702 dev_kfree_skb(skb);
1703 break;
1704 }
1705 }
1706
1707 static struct mt7996_msdu_page *
mt7996_msdu_page_get_from_cache(struct mt7996_dev * dev)1708 mt7996_msdu_page_get_from_cache(struct mt7996_dev *dev)
1709 {
1710 struct mt7996_msdu_page *p = NULL;
1711
1712 spin_lock(&dev->wed_rro.lock);
1713
1714 if (!list_empty(&dev->wed_rro.page_cache)) {
1715 p = list_first_entry(&dev->wed_rro.page_cache,
1716 struct mt7996_msdu_page, list);
1717 list_del(&p->list);
1718 }
1719
1720 spin_unlock(&dev->wed_rro.lock);
1721
1722 return p;
1723 }
1724
mt7996_msdu_page_get(struct mt7996_dev * dev)1725 static struct mt7996_msdu_page *mt7996_msdu_page_get(struct mt7996_dev *dev)
1726 {
1727 struct mt7996_msdu_page *p;
1728
1729 p = mt7996_msdu_page_get_from_cache(dev);
1730 if (!p) {
1731 p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC);
1732 if (p)
1733 INIT_LIST_HEAD(&p->list);
1734 }
1735
1736 return p;
1737 }
1738
mt7996_msdu_page_put_to_cache(struct mt7996_dev * dev,struct mt7996_msdu_page * p)1739 static void mt7996_msdu_page_put_to_cache(struct mt7996_dev *dev,
1740 struct mt7996_msdu_page *p)
1741 {
1742 if (p->buf) {
1743 mt76_put_page_pool_buf(p->buf, false);
1744 p->buf = NULL;
1745 }
1746
1747 spin_lock(&dev->wed_rro.lock);
1748 list_add(&p->list, &dev->wed_rro.page_cache);
1749 spin_unlock(&dev->wed_rro.lock);
1750 }
1751
mt7996_msdu_page_free_cache(struct mt7996_dev * dev)1752 static void mt7996_msdu_page_free_cache(struct mt7996_dev *dev)
1753 {
1754 while (true) {
1755 struct mt7996_msdu_page *p;
1756
1757 p = mt7996_msdu_page_get_from_cache(dev);
1758 if (!p)
1759 break;
1760
1761 if (p->buf)
1762 mt76_put_page_pool_buf(p->buf, false);
1763
1764 kfree(p);
1765 }
1766 }
1767
mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr)1768 static u32 mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr)
1769 {
1770 u32 val = 0;
1771 int i = 0;
1772
1773 while (dma_addr) {
1774 val += (u32)((dma_addr & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE;
1775 dma_addr >>= 8;
1776 i += 13;
1777 }
1778
1779 return val % MT7996_RRO_MSDU_PG_HASH_SIZE;
1780 }
1781
1782 static struct mt7996_msdu_page *
mt7996_rro_msdu_page_get(struct mt7996_dev * dev,dma_addr_t dma_addr)1783 mt7996_rro_msdu_page_get(struct mt7996_dev *dev, dma_addr_t dma_addr)
1784 {
1785 u32 hash = mt7996_msdu_page_hash_from_addr(dma_addr);
1786 struct mt7996_msdu_page *p, *tmp, *addr = NULL;
1787
1788 spin_lock(&dev->wed_rro.lock);
1789
1790 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[hash],
1791 list) {
1792 if (p->dma_addr == dma_addr) {
1793 list_del(&p->list);
1794 addr = p;
1795 break;
1796 }
1797 }
1798
1799 spin_unlock(&dev->wed_rro.lock);
1800
1801 return addr;
1802 }
1803
mt7996_rx_token_put(struct mt7996_dev * dev)1804 static void mt7996_rx_token_put(struct mt7996_dev *dev)
1805 {
1806 int i;
1807
1808 for (i = 0; i < dev->mt76.rx_token_size; i++) {
1809 struct mt76_txwi_cache *t;
1810
1811 t = mt76_rx_token_release(&dev->mt76, i);
1812 if (!t || !t->ptr)
1813 continue;
1814
1815 mt76_put_page_pool_buf(t->ptr, false);
1816 t->dma_addr = 0;
1817 t->ptr = NULL;
1818
1819 mt76_put_rxwi(&dev->mt76, t);
1820 }
1821 }
1822
mt7996_rro_msdu_page_map_free(struct mt7996_dev * dev)1823 void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev)
1824 {
1825 struct mt7996_msdu_page *p, *tmp;
1826 int i;
1827
1828 local_bh_disable();
1829
1830 for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++) {
1831 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[i],
1832 list) {
1833 list_del_init(&p->list);
1834 if (p->buf)
1835 mt76_put_page_pool_buf(p->buf, false);
1836 kfree(p);
1837 }
1838 }
1839 mt7996_msdu_page_free_cache(dev);
1840
1841 local_bh_enable();
1842
1843 mt7996_rx_token_put(dev);
1844 }
1845
mt7996_rro_msdu_page_add(struct mt76_dev * mdev,struct mt76_queue * q,dma_addr_t dma_addr,void * data)1846 int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
1847 dma_addr_t dma_addr, void *data)
1848 {
1849 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1850 struct mt7996_msdu_page_info *pinfo = data;
1851 struct mt7996_msdu_page *p;
1852 u32 hash;
1853
1854 pinfo->data |= cpu_to_le32(FIELD_PREP(MSDU_PAGE_INFO_OWNER_MASK, 1));
1855 p = mt7996_msdu_page_get(dev);
1856 if (!p)
1857 return -ENOMEM;
1858
1859 p->buf = data;
1860 p->dma_addr = dma_addr;
1861 p->q = q;
1862
1863 hash = mt7996_msdu_page_hash_from_addr(dma_addr);
1864
1865 spin_lock(&dev->wed_rro.lock);
1866 list_add_tail(&p->list, &dev->wed_rro.page_map[hash]);
1867 spin_unlock(&dev->wed_rro.lock);
1868
1869 return 0;
1870 }
1871
1872 static struct mt7996_wed_rro_addr *
mt7996_rro_addr_elem_get(struct mt7996_dev * dev,u16 session_id,u16 seq_num)1873 mt7996_rro_addr_elem_get(struct mt7996_dev *dev, u16 session_id, u16 seq_num)
1874 {
1875 u32 idx = 0;
1876 void *addr;
1877
1878 if (session_id == MT7996_RRO_MAX_SESSION) {
1879 addr = dev->wed_rro.session.ptr;
1880 } else {
1881 idx = session_id / MT7996_RRO_BA_BITMAP_SESSION_SIZE;
1882 addr = dev->wed_rro.addr_elem[idx].ptr;
1883
1884 idx = session_id % MT7996_RRO_BA_BITMAP_SESSION_SIZE;
1885 idx = idx * MT7996_RRO_WINDOW_MAX_LEN;
1886 }
1887 idx += seq_num % MT7996_RRO_WINDOW_MAX_LEN;
1888
1889 return addr + idx * sizeof(struct mt7996_wed_rro_addr);
1890 }
1891
1892 #define MT996_RRO_SN_MASK GENMASK(11, 0)
1893
mt7996_rro_rx_process(struct mt76_dev * mdev,void * data)1894 void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data)
1895 {
1896 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1897 struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data;
1898 u32 cmd_data0 = le32_to_cpu(cmd->data0);
1899 u32 cmd_data1 = le32_to_cpu(cmd->data1);
1900 u8 ind_reason = FIELD_GET(RRO_IND_DATA0_IND_REASON_MASK, cmd_data0);
1901 u16 start_seq = FIELD_GET(RRO_IND_DATA0_START_SEQ_MASK, cmd_data0);
1902 u16 seq_id = FIELD_GET(RRO_IND_DATA0_SEQ_ID_MASK, cmd_data0);
1903 u16 ind_count = FIELD_GET(RRO_IND_DATA1_IND_COUNT_MASK, cmd_data1);
1904 struct mt7996_msdu_page_info *pinfo = NULL;
1905 struct mt7996_msdu_page *p = NULL;
1906 int i, seq_num = 0;
1907
1908 for (i = 0; i < ind_count; i++) {
1909 struct mt7996_wed_rro_addr *e;
1910 struct mt76_rx_status *status;
1911 struct mt7996_rro_hif *rxd;
1912 int j, len, qid, data_len;
1913 struct mt76_txwi_cache *t;
1914 dma_addr_t dma_addr = 0;
1915 u16 rx_token_id, count;
1916 struct mt76_queue *q;
1917 struct sk_buff *skb;
1918 u32 info = 0, data;
1919 u8 signature;
1920 void *buf;
1921 bool ls;
1922
1923 seq_num = FIELD_GET(MT996_RRO_SN_MASK, start_seq + i);
1924 e = mt7996_rro_addr_elem_get(dev, seq_id, seq_num);
1925 data = le32_to_cpu(e->data);
1926 signature = FIELD_GET(WED_RRO_ADDR_SIGNATURE_MASK, data);
1927 if (signature != (seq_num / MT7996_RRO_WINDOW_MAX_LEN)) {
1928 u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK,
1929 0xff);
1930
1931 e->data |= cpu_to_le32(val);
1932 goto update_ack_seq_num;
1933 }
1934
1935 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1936 dma_addr = FIELD_GET(WED_RRO_ADDR_HEAD_HIGH_MASK, data);
1937 dma_addr <<= 32;
1938 #endif
1939 dma_addr |= le32_to_cpu(e->head_low);
1940
1941 count = FIELD_GET(WED_RRO_ADDR_COUNT_MASK, data);
1942 for (j = 0; j < count; j++) {
1943 if (!p) {
1944 p = mt7996_rro_msdu_page_get(dev, dma_addr);
1945 if (!p)
1946 continue;
1947
1948 dma_sync_single_for_cpu(mdev->dma_dev, p->dma_addr,
1949 SKB_WITH_OVERHEAD(p->q->buf_size),
1950 page_pool_get_dma_dir(p->q->page_pool));
1951 pinfo = (struct mt7996_msdu_page_info *)p->buf;
1952 }
1953
1954 rxd = &pinfo->rxd[j % MT7996_MAX_HIF_RXD_IN_PG];
1955 len = FIELD_GET(RRO_HIF_DATA1_SDL_MASK,
1956 le32_to_cpu(rxd->data1));
1957
1958 rx_token_id = FIELD_GET(RRO_HIF_DATA4_RX_TOKEN_ID_MASK,
1959 le32_to_cpu(rxd->data4));
1960 t = mt76_rx_token_release(mdev, rx_token_id);
1961 if (!t)
1962 goto next_page;
1963
1964 qid = t->qid;
1965 buf = t->ptr;
1966 q = &mdev->q_rx[qid];
1967 dma_sync_single_for_cpu(mdev->dma_dev, t->dma_addr,
1968 SKB_WITH_OVERHEAD(q->buf_size),
1969 page_pool_get_dma_dir(q->page_pool));
1970
1971 t->dma_addr = 0;
1972 t->ptr = NULL;
1973 mt76_put_rxwi(mdev, t);
1974 if (!buf)
1975 goto next_page;
1976
1977 if (q->rx_head)
1978 data_len = q->buf_size;
1979 else
1980 data_len = SKB_WITH_OVERHEAD(q->buf_size);
1981
1982 if (data_len < len + q->buf_offset) {
1983 dev_kfree_skb(q->rx_head);
1984 mt76_put_page_pool_buf(buf, false);
1985 q->rx_head = NULL;
1986 goto next_page;
1987 }
1988
1989 ls = FIELD_GET(RRO_HIF_DATA1_LS_MASK,
1990 le32_to_cpu(rxd->data1));
1991 if (q->rx_head) {
1992 /* TODO: Take into account non-linear skb. */
1993 mt76_put_page_pool_buf(buf, false);
1994 if (ls) {
1995 dev_kfree_skb(q->rx_head);
1996 q->rx_head = NULL;
1997 }
1998 goto next_page;
1999 }
2000
2001 if (ls && !mt7996_rx_check(mdev, buf, len))
2002 goto next_page;
2003
2004 skb = build_skb(buf, q->buf_size);
2005 if (!skb)
2006 goto next_page;
2007
2008 skb_reserve(skb, q->buf_offset);
2009 skb_mark_for_recycle(skb);
2010 __skb_put(skb, len);
2011
2012 if (ind_reason == 1 || ind_reason == 2) {
2013 dev_kfree_skb(skb);
2014 goto next_page;
2015 }
2016
2017 if (!ls) {
2018 q->rx_head = skb;
2019 goto next_page;
2020 }
2021
2022 status = (struct mt76_rx_status *)skb->cb;
2023 if (seq_id != MT7996_RRO_MAX_SESSION)
2024 status->aggr = true;
2025
2026 mt7996_queue_rx_skb(mdev, qid, skb, &info);
2027 next_page:
2028 if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) {
2029 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2030 dma_addr =
2031 FIELD_GET(MSDU_PAGE_INFO_PG_HIGH_MASK,
2032 le32_to_cpu(pinfo->data));
2033 dma_addr <<= 32;
2034 dma_addr |= le32_to_cpu(pinfo->pg_low);
2035 #else
2036 dma_addr = le32_to_cpu(pinfo->pg_low);
2037 #endif
2038 mt7996_msdu_page_put_to_cache(dev, p);
2039 p = NULL;
2040 }
2041 }
2042
2043 update_ack_seq_num:
2044 if ((i + 1) % 4 == 0)
2045 mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
2046 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK,
2047 seq_id) |
2048 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK,
2049 seq_num));
2050 if (p) {
2051 mt7996_msdu_page_put_to_cache(dev, p);
2052 p = NULL;
2053 }
2054 }
2055
2056 /* Update ack_seq_num for remaining addr_elem */
2057 if (i % 4)
2058 mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
2059 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, seq_id) |
2060 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, seq_num));
2061 }
2062
mt7996_mac_cca_stats_reset(struct mt7996_phy * phy)2063 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
2064 {
2065 struct mt7996_dev *dev = phy->dev;
2066 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
2067
2068 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
2069 mt76_set(dev, reg, BIT(11) | BIT(9));
2070 }
2071
mt7996_mac_reset_counters(struct mt7996_phy * phy)2072 void mt7996_mac_reset_counters(struct mt7996_phy *phy)
2073 {
2074 struct mt7996_dev *dev = phy->dev;
2075 u8 band_idx = phy->mt76->band_idx;
2076 int i;
2077
2078 for (i = 0; i < 16; i++)
2079 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2080
2081 phy->mt76->survey_time = ktime_get_boottime();
2082
2083 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
2084
2085 /* reset airtime counters */
2086 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
2087 MT_WF_RMAC_MIB_RXTIME_CLR);
2088
2089 mt7996_mcu_get_chan_mib_info(phy, true);
2090 }
2091
mt7996_mac_set_coverage_class(struct mt7996_phy * phy)2092 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
2093 {
2094 s16 coverage_class = phy->coverage_class;
2095 struct mt7996_dev *dev = phy->dev;
2096 struct mt7996_phy *phy2 = mt7996_phy2(dev);
2097 struct mt7996_phy *phy3 = mt7996_phy3(dev);
2098 u32 reg_offset;
2099 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
2100 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
2101 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
2102 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
2103 u8 band_idx = phy->mt76->band_idx;
2104 int offset;
2105
2106 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
2107 return;
2108
2109 if (phy2)
2110 coverage_class = max_t(s16, dev->phy.coverage_class,
2111 phy2->coverage_class);
2112
2113 if (phy3)
2114 coverage_class = max_t(s16, coverage_class,
2115 phy3->coverage_class);
2116
2117 offset = 3 * coverage_class;
2118 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
2119 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
2120
2121 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
2122 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
2123 }
2124
mt7996_mac_enable_nf(struct mt7996_dev * dev,u8 band)2125 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
2126 {
2127 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
2128 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
2129 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
2130
2131 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
2132 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
2133 }
2134
2135 static u8
mt7996_phy_get_nf(struct mt7996_phy * phy,u8 band_idx)2136 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
2137 {
2138 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
2139 struct mt7996_dev *dev = phy->dev;
2140 u32 val, sum = 0, n = 0;
2141 int ant, i;
2142
2143 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
2144 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
2145
2146 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
2147 val = mt76_rr(dev, reg);
2148 sum += val * nf_power[i];
2149 n += val;
2150 }
2151 }
2152
2153 return n ? sum / n : 0;
2154 }
2155
mt7996_update_channel(struct mt76_phy * mphy)2156 void mt7996_update_channel(struct mt76_phy *mphy)
2157 {
2158 struct mt7996_phy *phy = mphy->priv;
2159 struct mt76_channel_state *state = mphy->chan_state;
2160 int nf;
2161
2162 mt7996_mcu_get_chan_mib_info(phy, false);
2163
2164 nf = mt7996_phy_get_nf(phy, mphy->band_idx);
2165 if (!phy->noise)
2166 phy->noise = nf << 4;
2167 else if (nf)
2168 phy->noise += nf - (phy->noise >> 4);
2169
2170 state->noise = -(phy->noise >> 4);
2171 }
2172
2173 static bool
mt7996_wait_reset_state(struct mt7996_dev * dev,u32 state)2174 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
2175 {
2176 bool ret;
2177
2178 ret = wait_event_timeout(dev->reset_wait,
2179 (READ_ONCE(dev->recovery.state) & state),
2180 MT7996_RESET_TIMEOUT);
2181
2182 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
2183 return ret;
2184 }
2185
2186 static void
mt7996_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)2187 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
2188 {
2189 struct ieee80211_bss_conf *link_conf;
2190 struct mt7996_phy *phy = priv;
2191 struct mt7996_dev *dev = phy->dev;
2192 unsigned int link_id;
2193
2194
2195 switch (vif->type) {
2196 case NL80211_IFTYPE_MESH_POINT:
2197 case NL80211_IFTYPE_ADHOC:
2198 case NL80211_IFTYPE_AP:
2199 break;
2200 default:
2201 return;
2202 }
2203
2204 for_each_vif_active_link(vif, link_conf, link_id) {
2205 struct mt7996_vif_link *link;
2206
2207 link = mt7996_vif_link(dev, vif, link_id);
2208 if (!link || link->phy != phy)
2209 continue;
2210
2211 mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf,
2212 link_conf->enable_beacon);
2213 }
2214 }
2215
mt7996_mac_update_beacons(struct mt7996_phy * phy)2216 void mt7996_mac_update_beacons(struct mt7996_phy *phy)
2217 {
2218 ieee80211_iterate_active_interfaces(phy->mt76->hw,
2219 IEEE80211_IFACE_ITER_RESUME_ALL,
2220 mt7996_update_vif_beacon, phy);
2221 }
2222
2223 static void
mt7996_update_beacons(struct mt7996_dev * dev)2224 mt7996_update_beacons(struct mt7996_dev *dev)
2225 {
2226 struct mt76_phy *phy2, *phy3;
2227
2228 mt7996_mac_update_beacons(&dev->phy);
2229
2230 phy2 = dev->mt76.phys[MT_BAND1];
2231 if (phy2)
2232 mt7996_mac_update_beacons(phy2->priv);
2233
2234 phy3 = dev->mt76.phys[MT_BAND2];
2235 if (phy3)
2236 mt7996_mac_update_beacons(phy3->priv);
2237 }
2238
mt7996_tx_token_put(struct mt7996_dev * dev)2239 void mt7996_tx_token_put(struct mt7996_dev *dev)
2240 {
2241 struct mt76_txwi_cache *txwi;
2242 int id;
2243
2244 spin_lock_bh(&dev->mt76.token_lock);
2245 idr_for_each_entry(&dev->mt76.token, txwi, id) {
2246 mt7996_txwi_free(dev, txwi, NULL, NULL, NULL);
2247 dev->mt76.token_count--;
2248 }
2249 spin_unlock_bh(&dev->mt76.token_lock);
2250 idr_destroy(&dev->mt76.token);
2251 }
2252
2253 static int
mt7996_mac_restart(struct mt7996_dev * dev)2254 mt7996_mac_restart(struct mt7996_dev *dev)
2255 {
2256 struct mt76_dev *mdev = &dev->mt76;
2257 struct mt7996_phy *phy;
2258 int i, ret;
2259
2260 if (dev->hif2) {
2261 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
2262 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
2263 }
2264
2265 if (dev_is_pci(mdev->dev)) {
2266 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
2267 if (dev->hif2)
2268 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
2269 }
2270
2271 set_bit(MT76_MCU_RESET, &dev->mphy.state);
2272 mt7996_for_each_phy(dev, phy)
2273 set_bit(MT76_RESET, &phy->mt76->state);
2274 wake_up(&dev->mt76.mcu.wait);
2275
2276 /* lock/unlock all queues to ensure that no tx is pending */
2277 mt7996_for_each_phy(dev, phy)
2278 mt76_txq_schedule_all(phy->mt76);
2279
2280 /* disable all tx/rx napi */
2281 mt76_worker_disable(&dev->mt76.tx_worker);
2282 mt76_for_each_q_rx(mdev, i) {
2283 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2284 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
2285 continue;
2286
2287 if (mdev->q_rx[i].ndesc)
2288 napi_disable(&dev->mt76.napi[i]);
2289 }
2290 napi_disable(&dev->mt76.tx_napi);
2291
2292 /* token reinit */
2293 mt7996_tx_token_put(dev);
2294 idr_init(&dev->mt76.token);
2295
2296 mt7996_dma_reset(dev, true);
2297
2298 mt76_for_each_q_rx(mdev, i) {
2299 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2300 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
2301 continue;
2302
2303 if (mdev->q_rx[i].ndesc) {
2304 napi_enable(&dev->mt76.napi[i]);
2305 local_bh_disable();
2306 napi_schedule(&dev->mt76.napi[i]);
2307 local_bh_enable();
2308 }
2309 }
2310 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
2311 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
2312
2313 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
2314 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
2315 if (dev->hif2) {
2316 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
2317 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
2318 }
2319 if (dev_is_pci(mdev->dev)) {
2320 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
2321 if (dev->hif2)
2322 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
2323 }
2324
2325 /* load firmware */
2326 ret = mt7996_mcu_init_firmware(dev);
2327 if (ret)
2328 goto out;
2329
2330 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2331 mt7996_has_hwrro(dev)) {
2332 u32 wed_irq_mask = dev->mt76.mmio.irqmask |
2333 MT_INT_TX_DONE_BAND2;
2334
2335 mt7996_rro_hw_init(dev);
2336 mt76_for_each_q_rx(&dev->mt76, i) {
2337 if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
2338 mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i]))
2339 mt76_queue_rx_reset(dev, i);
2340 }
2341
2342 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
2343 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
2344 false);
2345 mt7996_irq_enable(dev, wed_irq_mask);
2346 mt7996_irq_disable(dev, 0);
2347 }
2348
2349 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
2350 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR,
2351 MT_INT_TX_RX_DONE_EXT);
2352 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
2353 MT_INT_TX_RX_DONE_EXT);
2354 }
2355
2356 /* set the necessary init items */
2357 ret = mt7996_mcu_set_eeprom(dev);
2358 if (ret)
2359 goto out;
2360
2361 mt7996_mac_init(dev);
2362 mt7996_for_each_phy(dev, phy)
2363 mt7996_init_txpower(phy);
2364 ret = mt7996_txbf_init(dev);
2365 if (ret)
2366 goto out;
2367
2368 mt7996_for_each_phy(dev, phy) {
2369 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
2370 continue;
2371
2372 ret = mt7996_run(phy);
2373 if (ret)
2374 goto out;
2375 }
2376
2377 out:
2378 /* reset done */
2379 mt7996_for_each_phy(dev, phy)
2380 clear_bit(MT76_RESET, &phy->mt76->state);
2381
2382 napi_enable(&dev->mt76.tx_napi);
2383 local_bh_disable();
2384 napi_schedule(&dev->mt76.tx_napi);
2385 local_bh_enable();
2386
2387 mt76_worker_enable(&dev->mt76.tx_worker);
2388 return ret;
2389 }
2390
2391 static void
mt7996_mac_reset_sta_iter(void * data,struct ieee80211_sta * sta)2392 mt7996_mac_reset_sta_iter(void *data, struct ieee80211_sta *sta)
2393 {
2394 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2395 struct mt7996_dev *dev = data;
2396 int i;
2397
2398 for (i = 0; i < ARRAY_SIZE(msta->link); i++) {
2399 struct mt7996_sta_link *msta_link = NULL;
2400
2401 msta_link = rcu_replace_pointer(msta->link[i], msta_link,
2402 lockdep_is_held(&dev->mt76.mutex));
2403 if (!msta_link)
2404 continue;
2405
2406 mt7996_mac_sta_deinit_link(dev, msta_link);
2407
2408 if (msta->deflink_id == i) {
2409 msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
2410 continue;
2411 }
2412
2413 kfree_rcu(msta_link, rcu_head);
2414 }
2415 }
2416
2417 static void
mt7996_mac_reset_vif_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2418 mt7996_mac_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2419 {
2420 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2421 struct mt76_vif_data *mvif = mlink->mvif;
2422 struct mt7996_dev *dev = data;
2423 int i;
2424
2425 rcu_read_lock();
2426 for (i = 0; i < ARRAY_SIZE(mvif->link); i++) {
2427
2428 mlink = mt76_dereference(mvif->link[i], &dev->mt76);
2429 if (!mlink || mlink == (struct mt76_vif_link *)vif->drv_priv)
2430 continue;
2431
2432 rcu_assign_pointer(mvif->link[i], NULL);
2433 kfree_rcu(mlink, rcu_head);
2434 }
2435 rcu_read_unlock();
2436 }
2437
2438 static void
mt7996_mac_full_reset(struct mt7996_dev * dev)2439 mt7996_mac_full_reset(struct mt7996_dev *dev)
2440 {
2441 struct ieee80211_hw *hw = mt76_hw(dev);
2442 struct mt7996_phy *phy;
2443 LIST_HEAD(list);
2444 int i;
2445
2446 dev->recovery.hw_full_reset = true;
2447
2448 wake_up(&dev->mt76.mcu.wait);
2449 ieee80211_stop_queues(hw);
2450
2451 cancel_work_sync(&dev->wed_rro.work);
2452 mt7996_for_each_phy(dev, phy)
2453 cancel_delayed_work_sync(&phy->mt76->mac_work);
2454
2455 mt76_abort_scan(&dev->mt76);
2456
2457 mutex_lock(&dev->mt76.mutex);
2458 for (i = 0; i < 10; i++) {
2459 if (!mt7996_mac_restart(dev))
2460 break;
2461 }
2462
2463 if (i == 10)
2464 dev_err(dev->mt76.dev, "chip full reset failed\n");
2465
2466 mt7996_for_each_phy(dev, phy)
2467 phy->omac_mask = 0;
2468
2469 ieee80211_iterate_stations_atomic(hw, mt7996_mac_reset_sta_iter, dev);
2470 ieee80211_iterate_active_interfaces_atomic(hw,
2471 IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
2472 mt7996_mac_reset_vif_iter, dev);
2473 mt76_reset_device(&dev->mt76);
2474
2475 INIT_LIST_HEAD(&dev->sta_rc_list);
2476 INIT_LIST_HEAD(&dev->twt_list);
2477
2478 spin_lock_bh(&dev->wed_rro.lock);
2479 list_splice_init(&dev->wed_rro.poll_list, &list);
2480 spin_unlock_bh(&dev->wed_rro.lock);
2481
2482 while (!list_empty(&list)) {
2483 struct mt7996_wed_rro_session_id *e;
2484
2485 e = list_first_entry(&list, struct mt7996_wed_rro_session_id,
2486 list);
2487 list_del_init(&e->list);
2488 kfree(e);
2489 }
2490
2491 i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
2492 dev->mt76.global_wcid.idx = i;
2493 dev->recovery.hw_full_reset = false;
2494
2495 mutex_unlock(&dev->mt76.mutex);
2496
2497 ieee80211_restart_hw(mt76_hw(dev));
2498 }
2499
mt7996_mac_reset_work(struct work_struct * work)2500 void mt7996_mac_reset_work(struct work_struct *work)
2501 {
2502 struct ieee80211_hw *hw;
2503 struct mt7996_dev *dev;
2504 struct mt7996_phy *phy;
2505 int i;
2506
2507 dev = container_of(work, struct mt7996_dev, reset_work);
2508 hw = mt76_hw(dev);
2509
2510 /* chip full reset */
2511 if (dev->recovery.restart) {
2512 /* disable WA/WM WDT */
2513 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
2514 MT_MCU_CMD_WDT_MASK);
2515
2516 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
2517 dev->recovery.wa_reset_count++;
2518 else
2519 dev->recovery.wm_reset_count++;
2520
2521 mt7996_mac_full_reset(dev);
2522
2523 /* enable mcu irq */
2524 mt7996_irq_enable(dev, MT_INT_MCU_CMD);
2525 mt7996_irq_disable(dev, 0);
2526
2527 /* enable WA/WM WDT */
2528 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
2529
2530 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
2531 dev->recovery.restart = false;
2532 return;
2533 }
2534
2535 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
2536 return;
2537
2538 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
2539 wiphy_name(hw->wiphy));
2540
2541 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
2542 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
2543
2544 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
2545 mtk_wed_device_stop(&dev->mt76.mmio.wed);
2546
2547 ieee80211_stop_queues(mt76_hw(dev));
2548
2549 set_bit(MT76_RESET, &dev->mphy.state);
2550 set_bit(MT76_MCU_RESET, &dev->mphy.state);
2551 mt76_abort_scan(&dev->mt76);
2552 wake_up(&dev->mt76.mcu.wait);
2553
2554 cancel_work_sync(&dev->wed_rro.work);
2555 mt7996_for_each_phy(dev, phy) {
2556 mt76_abort_roc(phy->mt76);
2557 set_bit(MT76_RESET, &phy->mt76->state);
2558 cancel_delayed_work_sync(&phy->mt76->mac_work);
2559 }
2560
2561 mt76_worker_disable(&dev->mt76.tx_worker);
2562 mt76_for_each_q_rx(&dev->mt76, i) {
2563 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2564 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2565 continue;
2566
2567 napi_disable(&dev->mt76.napi[i]);
2568 }
2569 napi_disable(&dev->mt76.tx_napi);
2570
2571 mutex_lock(&dev->mt76.mutex);
2572
2573 mt7996_npu_hw_stop(dev);
2574
2575 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
2576
2577 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
2578 mt7996_dma_reset(dev, false);
2579
2580 mt7996_tx_token_put(dev);
2581 idr_init(&dev->mt76.token);
2582
2583 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
2584 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
2585 }
2586
2587 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
2588 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
2589
2590 /* enable DMA Rx/Tx and interrupt */
2591 mt7996_dma_start(dev, false, false);
2592
2593 if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3)
2594 mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK);
2595
2596 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
2597 u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 |
2598 dev->mt76.mmio.irqmask;
2599
2600 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
2601 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
2602 true);
2603 mt7996_irq_enable(dev, wed_irq_mask);
2604 mt7996_irq_disable(dev, 0);
2605 }
2606
2607 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
2608 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
2609 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
2610 MT_INT_TX_RX_DONE_EXT);
2611 }
2612
2613 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
2614 mt7996_for_each_phy(dev, phy)
2615 clear_bit(MT76_RESET, &phy->mt76->state);
2616
2617 mt76_for_each_q_rx(&dev->mt76, i) {
2618 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2619 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2620 continue;
2621
2622 napi_enable(&dev->mt76.napi[i]);
2623 local_bh_disable();
2624 napi_schedule(&dev->mt76.napi[i]);
2625 local_bh_enable();
2626 }
2627
2628 tasklet_schedule(&dev->mt76.irq_tasklet);
2629
2630 mt76_worker_enable(&dev->mt76.tx_worker);
2631
2632 napi_enable(&dev->mt76.tx_napi);
2633 local_bh_disable();
2634 napi_schedule(&dev->mt76.tx_napi);
2635 local_bh_enable();
2636
2637 ieee80211_wake_queues(hw);
2638 mt7996_update_beacons(dev);
2639
2640 mutex_unlock(&dev->mt76.mutex);
2641
2642 mt7996_npu_hw_init(dev);
2643
2644 mt7996_for_each_phy(dev, phy)
2645 ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
2646 MT7996_WATCHDOG_TIME);
2647 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
2648 wiphy_name(dev->mt76.hw->wiphy));
2649 }
2650
2651 /* firmware coredump */
mt7996_mac_dump_work(struct work_struct * work)2652 void mt7996_mac_dump_work(struct work_struct *work)
2653 {
2654 const struct mt7996_mem_region *mem_region;
2655 struct mt7996_crash_data *crash_data;
2656 struct mt7996_dev *dev;
2657 struct mt7996_mem_hdr *hdr;
2658 size_t buf_len;
2659 int i;
2660 u32 num;
2661 u8 *buf;
2662
2663 dev = container_of(work, struct mt7996_dev, dump_work);
2664
2665 mutex_lock(&dev->dump_mutex);
2666
2667 crash_data = mt7996_coredump_new(dev);
2668 if (!crash_data) {
2669 mutex_unlock(&dev->dump_mutex);
2670 goto skip_coredump;
2671 }
2672
2673 mem_region = mt7996_coredump_get_mem_layout(dev, &num);
2674 if (!mem_region || !crash_data->memdump_buf_len) {
2675 mutex_unlock(&dev->dump_mutex);
2676 goto skip_memdump;
2677 }
2678
2679 buf = crash_data->memdump_buf;
2680 buf_len = crash_data->memdump_buf_len;
2681
2682 /* dumping memory content... */
2683 memset(buf, 0, buf_len);
2684 for (i = 0; i < num; i++) {
2685 if (mem_region->len > buf_len) {
2686 dev_warn(dev->mt76.dev, "%s len %zu is too large\n",
2687 mem_region->name, mem_region->len);
2688 break;
2689 }
2690
2691 /* reserve space for the header */
2692 hdr = (void *)buf;
2693 buf += sizeof(*hdr);
2694 buf_len -= sizeof(*hdr);
2695
2696 mt7996_memcpy_fromio(dev, buf, mem_region->start,
2697 mem_region->len);
2698
2699 hdr->start = mem_region->start;
2700 hdr->len = mem_region->len;
2701
2702 if (!mem_region->len)
2703 /* note: the header remains, just with zero length */
2704 break;
2705
2706 buf += mem_region->len;
2707 buf_len -= mem_region->len;
2708
2709 mem_region++;
2710 }
2711
2712 mutex_unlock(&dev->dump_mutex);
2713
2714 skip_memdump:
2715 mt7996_coredump_submit(dev);
2716 skip_coredump:
2717 queue_work(dev->mt76.wq, &dev->reset_work);
2718 }
2719
mt7996_reset(struct mt7996_dev * dev)2720 void mt7996_reset(struct mt7996_dev *dev)
2721 {
2722 if (!dev->recovery.hw_init_done)
2723 return;
2724
2725 if (dev->recovery.hw_full_reset)
2726 return;
2727
2728 /* wm/wa exception: do full recovery */
2729 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
2730 dev->recovery.restart = true;
2731 dev_info(dev->mt76.dev,
2732 "%s indicated firmware crash, attempting recovery\n",
2733 wiphy_name(dev->mt76.hw->wiphy));
2734
2735 mt7996_irq_disable(dev, MT_INT_MCU_CMD);
2736 queue_work(dev->mt76.wq, &dev->dump_work);
2737 return;
2738 }
2739
2740 queue_work(dev->mt76.wq, &dev->reset_work);
2741 wake_up(&dev->reset_wait);
2742 }
2743
mt7996_mac_update_stats(struct mt7996_phy * phy)2744 void mt7996_mac_update_stats(struct mt7996_phy *phy)
2745 {
2746 struct mt76_mib_stats *mib = &phy->mib;
2747 struct mt7996_dev *dev = phy->dev;
2748 u8 band_idx = phy->mt76->band_idx;
2749 u32 cnt;
2750 int i;
2751
2752 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
2753 mib->fcs_err_cnt += cnt;
2754
2755 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
2756 mib->rx_fifo_full_cnt += cnt;
2757
2758 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
2759 mib->rx_mpdu_cnt += cnt;
2760
2761 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
2762 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
2763
2764 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
2765 mib->rx_vector_mismatch_cnt += cnt;
2766
2767 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
2768 mib->rx_delimiter_fail_cnt += cnt;
2769
2770 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
2771 mib->rx_len_mismatch_cnt += cnt;
2772
2773 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
2774 mib->tx_ampdu_cnt += cnt;
2775
2776 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
2777 mib->tx_stop_q_empty_cnt += cnt;
2778
2779 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
2780 mib->tx_mpdu_attempts_cnt += cnt;
2781
2782 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2783 mib->tx_mpdu_success_cnt += cnt;
2784
2785 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2786 mib->rx_ampdu_cnt += cnt;
2787
2788 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2789 mib->rx_ampdu_bytes_cnt += cnt;
2790
2791 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2792 mib->rx_ampdu_valid_subframe_cnt += cnt;
2793
2794 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2795 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2796
2797 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2798 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2799
2800 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2801 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2802
2803 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2804 mib->rx_pfdrop_cnt += cnt;
2805
2806 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2807 mib->rx_vec_queue_overflow_drop_cnt += cnt;
2808
2809 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2810 mib->rx_ba_cnt += cnt;
2811
2812 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2813 mib->tx_bf_ebf_ppdu_cnt += cnt;
2814
2815 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2816 mib->tx_bf_ibf_ppdu_cnt += cnt;
2817
2818 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2819 mib->tx_mu_bf_cnt += cnt;
2820
2821 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2822 mib->tx_mu_mpdu_cnt += cnt;
2823
2824 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2825 mib->tx_mu_acked_mpdu_cnt += cnt;
2826
2827 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2828 mib->tx_su_acked_mpdu_cnt += cnt;
2829
2830 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2831 mib->tx_bf_rx_fb_ht_cnt += cnt;
2832 mib->tx_bf_rx_fb_all_cnt += cnt;
2833
2834 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2835 mib->tx_bf_rx_fb_vht_cnt += cnt;
2836 mib->tx_bf_rx_fb_all_cnt += cnt;
2837
2838 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2839 mib->tx_bf_rx_fb_he_cnt += cnt;
2840 mib->tx_bf_rx_fb_all_cnt += cnt;
2841
2842 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2843 mib->tx_bf_rx_fb_eht_cnt += cnt;
2844 mib->tx_bf_rx_fb_all_cnt += cnt;
2845
2846 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2847 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2848 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2849 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2850
2851 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2852 mib->tx_bf_fb_trig_cnt += cnt;
2853
2854 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2855 mib->tx_bf_fb_cpl_cnt += cnt;
2856
2857 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2858 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2859 mib->tx_amsdu[i] += cnt;
2860 mib->tx_amsdu_cnt += cnt;
2861 }
2862
2863 /* rts count */
2864 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2865 mib->rts_cnt += cnt;
2866
2867 /* rts retry count */
2868 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2869 mib->rts_retries_cnt += cnt;
2870
2871 /* ba miss count */
2872 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2873 mib->ba_miss_cnt += cnt;
2874
2875 /* ack fail count */
2876 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2877 mib->ack_fail_cnt += cnt;
2878
2879 for (i = 0; i < 16; i++) {
2880 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2881 phy->mt76->aggr_stats[i] += cnt;
2882 }
2883 }
2884
mt7996_mac_sta_rc_work(struct work_struct * work)2885 void mt7996_mac_sta_rc_work(struct work_struct *work)
2886 {
2887 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2888 struct mt7996_sta_link *msta_link;
2889 struct ieee80211_vif *vif;
2890 struct mt7996_vif *mvif;
2891 LIST_HEAD(list);
2892 u32 changed;
2893
2894 mutex_lock(&dev->mt76.mutex);
2895
2896 spin_lock_bh(&dev->mt76.sta_poll_lock);
2897 list_splice_init(&dev->sta_rc_list, &list);
2898
2899 while (!list_empty(&list)) {
2900 msta_link = list_first_entry(&list, struct mt7996_sta_link,
2901 rc_list);
2902 list_del_init(&msta_link->rc_list);
2903
2904 changed = msta_link->changed;
2905 msta_link->changed = 0;
2906 mvif = msta_link->sta->vif;
2907 vif = container_of((void *)mvif, struct ieee80211_vif,
2908 drv_priv);
2909
2910 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2911
2912 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2913 IEEE80211_RC_NSS_CHANGED |
2914 IEEE80211_RC_BW_CHANGED))
2915 mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif,
2916 msta_link->wcid.link_id,
2917 true);
2918
2919 if (changed & IEEE80211_RC_SMPS_CHANGED)
2920 mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL,
2921 msta_link->wcid.link_id,
2922 RATE_PARAM_MMPS_UPDATE);
2923
2924 spin_lock_bh(&dev->mt76.sta_poll_lock);
2925 }
2926
2927 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2928
2929 mutex_unlock(&dev->mt76.mutex);
2930 }
2931
mt7996_mac_work(struct work_struct * work)2932 void mt7996_mac_work(struct work_struct *work)
2933 {
2934 struct mt7996_phy *phy;
2935 struct mt76_phy *mphy;
2936
2937 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2938 mac_work.work);
2939 phy = mphy->priv;
2940
2941 mutex_lock(&mphy->dev->mutex);
2942
2943 mt76_update_survey(mphy);
2944 if (++mphy->mac_work_count == 5) {
2945 mphy->mac_work_count = 0;
2946
2947 mt7996_mac_update_stats(phy);
2948
2949 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE);
2950 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
2951 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT);
2952 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT);
2953 }
2954 }
2955
2956 mutex_unlock(&mphy->dev->mutex);
2957
2958 mt76_tx_status_check(mphy->dev, false);
2959
2960 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2961 MT7996_WATCHDOG_TIME);
2962 }
2963
mt7996_dfs_stop_radar_detector(struct mt7996_phy * phy)2964 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2965 {
2966 struct mt7996_dev *dev = phy->dev;
2967 int rdd_idx = mt7996_get_rdd_idx(phy, false);
2968
2969 if (rdd_idx < 0)
2970 return;
2971
2972 mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0);
2973 }
2974
mt7996_dfs_start_rdd(struct mt7996_dev * dev,int rdd_idx)2975 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx)
2976 {
2977 int err, region;
2978
2979 switch (dev->mt76.region) {
2980 case NL80211_DFS_ETSI:
2981 region = 0;
2982 break;
2983 case NL80211_DFS_JP:
2984 region = 2;
2985 break;
2986 case NL80211_DFS_FCC:
2987 default:
2988 region = 1;
2989 break;
2990 }
2991
2992 err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region);
2993 if (err < 0)
2994 return err;
2995
2996 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1);
2997 }
2998
mt7996_dfs_start_radar_detector(struct mt7996_phy * phy)2999 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
3000 {
3001 struct mt7996_dev *dev = phy->dev;
3002 int err, rdd_idx;
3003
3004 rdd_idx = mt7996_get_rdd_idx(phy, false);
3005 if (rdd_idx < 0)
3006 return -EINVAL;
3007
3008 /* start CAC */
3009 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0);
3010 if (err < 0)
3011 return err;
3012
3013 err = mt7996_dfs_start_rdd(dev, rdd_idx);
3014
3015 return err;
3016 }
3017
3018 static int
mt7996_dfs_init_radar_specs(struct mt7996_phy * phy)3019 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
3020 {
3021 const struct mt7996_dfs_radar_spec *radar_specs;
3022 struct mt7996_dev *dev = phy->dev;
3023 int err, i;
3024
3025 switch (dev->mt76.region) {
3026 case NL80211_DFS_FCC:
3027 radar_specs = &fcc_radar_specs;
3028 err = mt7996_mcu_set_fcc5_lpn(dev, 8);
3029 if (err < 0)
3030 return err;
3031 break;
3032 case NL80211_DFS_ETSI:
3033 radar_specs = &etsi_radar_specs;
3034 break;
3035 case NL80211_DFS_JP:
3036 radar_specs = &jp_radar_specs;
3037 break;
3038 default:
3039 return -EINVAL;
3040 }
3041
3042 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
3043 err = mt7996_mcu_set_radar_th(dev, i,
3044 &radar_specs->radar_pattern[i]);
3045 if (err < 0)
3046 return err;
3047 }
3048
3049 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
3050 }
3051
mt7996_dfs_init_radar_detector(struct mt7996_phy * phy)3052 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
3053 {
3054 struct mt7996_dev *dev = phy->dev;
3055 enum mt76_dfs_state dfs_state, prev_state;
3056 int err, rdd_idx = mt7996_get_rdd_idx(phy, false);
3057
3058 prev_state = phy->mt76->dfs_state;
3059 dfs_state = mt76_phy_dfs_state(phy->mt76);
3060
3061 if (prev_state == dfs_state || rdd_idx < 0)
3062 return 0;
3063
3064 if (prev_state == MT_DFS_STATE_UNKNOWN)
3065 mt7996_dfs_stop_radar_detector(phy);
3066
3067 if (dfs_state == MT_DFS_STATE_DISABLED)
3068 goto stop;
3069
3070 if (prev_state <= MT_DFS_STATE_DISABLED) {
3071 err = mt7996_dfs_init_radar_specs(phy);
3072 if (err < 0)
3073 return err;
3074
3075 err = mt7996_dfs_start_radar_detector(phy);
3076 if (err < 0)
3077 return err;
3078
3079 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
3080 }
3081
3082 if (dfs_state == MT_DFS_STATE_CAC)
3083 return 0;
3084
3085 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0);
3086 if (err < 0) {
3087 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
3088 return err;
3089 }
3090
3091 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
3092 return 0;
3093
3094 stop:
3095 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0);
3096 if (err < 0)
3097 return err;
3098
3099 mt7996_dfs_stop_radar_detector(phy);
3100 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
3101
3102 return 0;
3103 }
3104
3105 static int
mt7996_mac_twt_duration_align(int duration)3106 mt7996_mac_twt_duration_align(int duration)
3107 {
3108 return duration << 8;
3109 }
3110
3111 static u64
mt7996_mac_twt_sched_list_add(struct mt7996_dev * dev,struct mt7996_twt_flow * flow)3112 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
3113 struct mt7996_twt_flow *flow)
3114 {
3115 struct mt7996_twt_flow *iter, *iter_next;
3116 u32 duration = flow->duration << 8;
3117 u64 start_tsf;
3118
3119 iter = list_first_entry_or_null(&dev->twt_list,
3120 struct mt7996_twt_flow, list);
3121 if (!iter || !iter->sched || iter->start_tsf > duration) {
3122 /* add flow as first entry in the list */
3123 list_add(&flow->list, &dev->twt_list);
3124 return 0;
3125 }
3126
3127 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
3128 start_tsf = iter->start_tsf +
3129 mt7996_mac_twt_duration_align(iter->duration);
3130 if (list_is_last(&iter->list, &dev->twt_list))
3131 break;
3132
3133 if (!iter_next->sched ||
3134 iter_next->start_tsf > start_tsf + duration) {
3135 list_add(&flow->list, &iter->list);
3136 goto out;
3137 }
3138 }
3139
3140 /* add flow as last entry in the list */
3141 list_add_tail(&flow->list, &dev->twt_list);
3142 out:
3143 return start_tsf;
3144 }
3145
mt7996_mac_check_twt_req(struct ieee80211_twt_setup * twt)3146 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
3147 {
3148 struct ieee80211_twt_params *twt_agrt;
3149 u64 interval, duration;
3150 u16 mantissa;
3151 u8 exp;
3152
3153 /* only individual agreement supported */
3154 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
3155 return -EOPNOTSUPP;
3156
3157 /* only 256us unit supported */
3158 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
3159 return -EOPNOTSUPP;
3160
3161 twt_agrt = (struct ieee80211_twt_params *)twt->params;
3162
3163 /* explicit agreement not supported */
3164 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
3165 return -EOPNOTSUPP;
3166
3167 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
3168 le16_to_cpu(twt_agrt->req_type));
3169 mantissa = le16_to_cpu(twt_agrt->mantissa);
3170 duration = twt_agrt->min_twt_dur << 8;
3171
3172 interval = (u64)mantissa << exp;
3173 if (interval < duration)
3174 return -EOPNOTSUPP;
3175
3176 return 0;
3177 }
3178
3179 static bool
mt7996_mac_twt_param_equal(struct mt7996_sta_link * msta_link,struct ieee80211_twt_params * twt_agrt)3180 mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link,
3181 struct ieee80211_twt_params *twt_agrt)
3182 {
3183 u16 type = le16_to_cpu(twt_agrt->req_type);
3184 u8 exp;
3185 int i;
3186
3187 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
3188 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
3189 struct mt7996_twt_flow *f;
3190
3191 if (!(msta_link->twt.flowid_mask & BIT(i)))
3192 continue;
3193
3194 f = &msta_link->twt.flow[i];
3195 if (f->duration == twt_agrt->min_twt_dur &&
3196 f->mantissa == twt_agrt->mantissa &&
3197 f->exp == exp &&
3198 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
3199 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
3200 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
3201 return true;
3202 }
3203
3204 return false;
3205 }
3206
mt7996_mac_add_twt_setup(struct ieee80211_hw * hw,struct ieee80211_sta * sta,struct ieee80211_twt_setup * twt)3207 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
3208 struct ieee80211_sta *sta,
3209 struct ieee80211_twt_setup *twt)
3210 {
3211 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
3212 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
3213 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
3214 struct mt7996_sta_link *msta_link = &msta->deflink;
3215 u16 req_type = le16_to_cpu(twt_agrt->req_type);
3216 enum ieee80211_twt_setup_cmd sta_setup_cmd;
3217 struct mt7996_dev *dev = mt7996_hw_dev(hw);
3218 struct mt7996_twt_flow *flow;
3219 u8 flowid, table_id, exp;
3220
3221 if (mt7996_mac_check_twt_req(twt))
3222 goto out;
3223
3224 mutex_lock(&dev->mt76.mutex);
3225
3226 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
3227 goto unlock;
3228
3229 if (hweight8(msta_link->twt.flowid_mask) ==
3230 ARRAY_SIZE(msta_link->twt.flow))
3231 goto unlock;
3232
3233 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
3234 setup_cmd = TWT_SETUP_CMD_DICTATE;
3235 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
3236 goto unlock;
3237 }
3238
3239 if (mt7996_mac_twt_param_equal(msta_link, twt_agrt))
3240 goto unlock;
3241
3242 flowid = ffs(~msta_link->twt.flowid_mask) - 1;
3243 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
3244 twt_agrt->req_type |= le16_encode_bits(flowid,
3245 IEEE80211_TWT_REQTYPE_FLOWID);
3246
3247 table_id = ffs(~dev->twt.table_mask) - 1;
3248 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
3249 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
3250
3251 flow = &msta_link->twt.flow[flowid];
3252 memset(flow, 0, sizeof(*flow));
3253 INIT_LIST_HEAD(&flow->list);
3254 flow->wcid = msta_link->wcid.idx;
3255 flow->table_id = table_id;
3256 flow->id = flowid;
3257 flow->duration = twt_agrt->min_twt_dur;
3258 flow->mantissa = twt_agrt->mantissa;
3259 flow->exp = exp;
3260 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
3261 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
3262 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
3263
3264 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
3265 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
3266 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
3267 u64 flow_tsf, curr_tsf;
3268 u32 rem;
3269
3270 flow->sched = true;
3271 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
3272 curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink);
3273 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
3274 flow_tsf = curr_tsf + interval - rem;
3275 twt_agrt->twt = cpu_to_le64(flow_tsf);
3276 } else {
3277 list_add_tail(&flow->list, &dev->twt_list);
3278 }
3279 flow->tsf = le64_to_cpu(twt_agrt->twt);
3280
3281 if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow,
3282 MCU_TWT_AGRT_ADD))
3283 goto unlock;
3284
3285 setup_cmd = TWT_SETUP_CMD_ACCEPT;
3286 dev->twt.table_mask |= BIT(table_id);
3287 msta_link->twt.flowid_mask |= BIT(flowid);
3288 dev->twt.n_agrt++;
3289
3290 unlock:
3291 mutex_unlock(&dev->mt76.mutex);
3292 out:
3293 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
3294 twt_agrt->req_type |=
3295 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
3296 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
3297 }
3298
mt7996_mac_twt_teardown_flow(struct mt7996_dev * dev,struct mt7996_vif_link * link,struct mt7996_sta_link * msta_link,u8 flowid)3299 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
3300 struct mt7996_vif_link *link,
3301 struct mt7996_sta_link *msta_link,
3302 u8 flowid)
3303 {
3304 struct mt7996_twt_flow *flow;
3305
3306 lockdep_assert_held(&dev->mt76.mutex);
3307
3308 if (flowid >= ARRAY_SIZE(msta_link->twt.flow))
3309 return;
3310
3311 if (!(msta_link->twt.flowid_mask & BIT(flowid)))
3312 return;
3313
3314 flow = &msta_link->twt.flow[flowid];
3315 if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE))
3316 return;
3317
3318 list_del_init(&flow->list);
3319 msta_link->twt.flowid_mask &= ~BIT(flowid);
3320 dev->twt.table_mask &= ~BIT(flow->table_id);
3321 dev->twt.n_agrt--;
3322 }
3323