1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
8 #include "coredump.h"
9 #include "mt7996.h"
10 #include "../dma.h"
11 #include "mac.h"
12 #include "mcu.h"
13 #if defined(__FreeBSD__)
14 #include <linux/delay.h>
15 #endif
16
17 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
18
19 static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
20 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
21 .radar_pattern = {
22 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
23 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
24 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
25 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
26 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
27 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
28 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
29 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
30 },
31 };
32
33 static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
34 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
35 .radar_pattern = {
36 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
37 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
38 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
39 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
40 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
41 },
42 };
43
44 static const struct mt7996_dfs_radar_spec jp_radar_specs = {
45 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
46 .radar_pattern = {
47 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
48 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
49 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
50 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
51 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
52 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
53 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
54 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
55 },
56 };
57
mt7996_rx_get_wcid(struct mt7996_dev * dev,u16 idx,bool unicast)58 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
59 u16 idx, bool unicast)
60 {
61 struct mt7996_sta *sta;
62 struct mt76_wcid *wcid;
63
64 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
65 return NULL;
66
67 wcid = rcu_dereference(dev->mt76.wcid[idx]);
68 if (unicast || !wcid)
69 return wcid;
70
71 if (!wcid->sta)
72 return NULL;
73
74 sta = container_of(wcid, struct mt7996_sta, wcid);
75 if (!sta->vif)
76 return NULL;
77
78 return &sta->vif->sta.wcid;
79 }
80
mt7996_mac_wtbl_update(struct mt7996_dev * dev,int idx,u32 mask)81 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
82 {
83 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
84 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
85
86 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
87 0, 5000);
88 }
89
mt7996_mac_wtbl_lmac_addr(struct mt7996_dev * dev,u16 wcid,u8 dw)90 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
91 {
92 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
93 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
94
95 return MT_WTBL_LMAC_OFFS(wcid, dw);
96 }
97
mt7996_mac_sta_poll(struct mt7996_dev * dev)98 static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
99 {
100 static const u8 ac_to_tid[] = {
101 [IEEE80211_AC_BE] = 0,
102 [IEEE80211_AC_BK] = 1,
103 [IEEE80211_AC_VI] = 4,
104 [IEEE80211_AC_VO] = 6
105 };
106 struct ieee80211_sta *sta;
107 struct mt7996_sta *msta;
108 struct rate_info *rate;
109 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
110 LIST_HEAD(sta_poll_list);
111 int i;
112
113 spin_lock_bh(&dev->mt76.sta_poll_lock);
114 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
115 spin_unlock_bh(&dev->mt76.sta_poll_lock);
116
117 rcu_read_lock();
118
119 while (true) {
120 bool clear = false;
121 u32 addr, val;
122 u16 idx;
123 s8 rssi[4];
124 u8 bw;
125
126 spin_lock_bh(&dev->mt76.sta_poll_lock);
127 if (list_empty(&sta_poll_list)) {
128 spin_unlock_bh(&dev->mt76.sta_poll_lock);
129 break;
130 }
131 msta = list_first_entry(&sta_poll_list,
132 struct mt7996_sta, wcid.poll_list);
133 list_del_init(&msta->wcid.poll_list);
134 spin_unlock_bh(&dev->mt76.sta_poll_lock);
135
136 idx = msta->wcid.idx;
137
138 /* refresh peer's airtime reporting */
139 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
140
141 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
142 u32 tx_last = msta->airtime_ac[i];
143 u32 rx_last = msta->airtime_ac[i + 4];
144
145 msta->airtime_ac[i] = mt76_rr(dev, addr);
146 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
147
148 tx_time[i] = msta->airtime_ac[i] - tx_last;
149 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
150
151 if ((tx_last | rx_last) & BIT(30))
152 clear = true;
153
154 addr += 8;
155 }
156
157 if (clear) {
158 mt7996_mac_wtbl_update(dev, idx,
159 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
160 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
161 }
162
163 if (!msta->wcid.sta)
164 continue;
165
166 sta = container_of((void *)msta, struct ieee80211_sta,
167 drv_priv);
168 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
169 u8 q = mt76_connac_lmac_mapping(i);
170 u32 tx_cur = tx_time[q];
171 u32 rx_cur = rx_time[q];
172 u8 tid = ac_to_tid[i];
173
174 if (!tx_cur && !rx_cur)
175 continue;
176
177 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
178 }
179
180 /* We don't support reading GI info from txs packets.
181 * For accurate tx status reporting and AQL improvement,
182 * we need to make sure that flags match so polling GI
183 * from per-sta counters directly.
184 */
185 rate = &msta->wcid.rate;
186
187 switch (rate->bw) {
188 case RATE_INFO_BW_320:
189 bw = IEEE80211_STA_RX_BW_320;
190 break;
191 case RATE_INFO_BW_160:
192 bw = IEEE80211_STA_RX_BW_160;
193 break;
194 case RATE_INFO_BW_80:
195 bw = IEEE80211_STA_RX_BW_80;
196 break;
197 case RATE_INFO_BW_40:
198 bw = IEEE80211_STA_RX_BW_40;
199 break;
200 default:
201 bw = IEEE80211_STA_RX_BW_20;
202 break;
203 }
204
205 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6);
206 val = mt76_rr(dev, addr);
207 if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
208 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 5);
209 val = mt76_rr(dev, addr);
210 rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
211 } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
212 u8 offs = 24 + 2 * bw;
213
214 rate->he_gi = (val & (0x3 << offs)) >> offs;
215 } else if (rate->flags &
216 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
217 if (val & BIT(12 + bw))
218 rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
219 else
220 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
221 }
222
223 /* get signal strength of resp frames (CTS/BA/ACK) */
224 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
225 val = mt76_rr(dev, addr);
226
227 rssi[0] = to_rssi(GENMASK(7, 0), val);
228 rssi[1] = to_rssi(GENMASK(15, 8), val);
229 rssi[2] = to_rssi(GENMASK(23, 16), val);
230 rssi[3] = to_rssi(GENMASK(31, 14), val);
231
232 msta->ack_signal =
233 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
234
235 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
236 }
237
238 rcu_read_unlock();
239 }
240
mt7996_mac_enable_rtscts(struct mt7996_dev * dev,struct ieee80211_vif * vif,bool enable)241 void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
242 struct ieee80211_vif *vif, bool enable)
243 {
244 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
245 u32 addr;
246
247 addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
248 if (enable)
249 mt76_set(dev, addr, BIT(5));
250 else
251 mt76_clear(dev, addr, BIT(5));
252 }
253
mt7996_mac_set_fixed_rate_table(struct mt7996_dev * dev,u8 tbl_idx,u16 rate_idx)254 void mt7996_mac_set_fixed_rate_table(struct mt7996_dev *dev,
255 u8 tbl_idx, u16 rate_idx)
256 {
257 u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx;
258
259 mt76_wr(dev, MT_WTBL_ITDR0, rate_idx);
260 /* use wtbl spe idx */
261 mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL);
262 mt76_wr(dev, MT_WTBL_ITCR, ctrl);
263 }
264
265 /* The HW does not translate the mac header to 802.3 for mesh point */
mt7996_reverse_frag0_hdr_trans(struct sk_buff * skb,u16 hdr_gap)266 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
267 {
268 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
269 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
270 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
271 __le32 *rxd = (__le32 *)skb->data;
272 struct ieee80211_sta *sta;
273 struct ieee80211_vif *vif;
274 struct ieee80211_hdr hdr;
275 u16 frame_control;
276
277 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
278 MT_RXD3_NORMAL_U2M)
279 return -EINVAL;
280
281 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
282 return -EINVAL;
283
284 if (!msta || !msta->vif)
285 return -EINVAL;
286
287 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
288 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
289
290 /* store the info from RXD and ethhdr to avoid being overridden */
291 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
292 hdr.frame_control = cpu_to_le16(frame_control);
293 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
294 hdr.duration_id = 0;
295
296 ether_addr_copy(hdr.addr1, vif->addr);
297 ether_addr_copy(hdr.addr2, sta->addr);
298 switch (frame_control & (IEEE80211_FCTL_TODS |
299 IEEE80211_FCTL_FROMDS)) {
300 case 0:
301 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
302 break;
303 case IEEE80211_FCTL_FROMDS:
304 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
305 break;
306 case IEEE80211_FCTL_TODS:
307 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
308 break;
309 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
310 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
311 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
312 break;
313 default:
314 return -EINVAL;
315 }
316
317 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
318 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
319 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
320 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
321 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
322 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
323 else
324 skb_pull(skb, 2);
325
326 if (ieee80211_has_order(hdr.frame_control))
327 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
328 IEEE80211_HT_CTL_LEN);
329 if (ieee80211_is_data_qos(hdr.frame_control)) {
330 __le16 qos_ctrl;
331
332 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
333 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
334 IEEE80211_QOS_CTL_LEN);
335 }
336
337 if (ieee80211_has_a4(hdr.frame_control))
338 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
339 else
340 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
341
342 return 0;
343 }
344
345 static int
mt7996_mac_fill_rx_rate(struct mt7996_dev * dev,struct mt76_rx_status * status,struct ieee80211_supported_band * sband,__le32 * rxv,u8 * mode)346 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
347 struct mt76_rx_status *status,
348 struct ieee80211_supported_band *sband,
349 __le32 *rxv, u8 *mode)
350 {
351 u32 v0, v2;
352 u8 stbc, gi, bw, dcm, nss;
353 int i, idx;
354 bool cck = false;
355
356 v0 = le32_to_cpu(rxv[0]);
357 v2 = le32_to_cpu(rxv[2]);
358
359 idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
360 i = idx;
361 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
362
363 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
364 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
365 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
366 dcm = FIELD_GET(MT_PRXV_DCM, v2);
367 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
368
369 switch (*mode) {
370 case MT_PHY_TYPE_CCK:
371 cck = true;
372 fallthrough;
373 case MT_PHY_TYPE_OFDM:
374 i = mt76_get_rate(&dev->mt76, sband, i, cck);
375 break;
376 case MT_PHY_TYPE_HT_GF:
377 case MT_PHY_TYPE_HT:
378 status->encoding = RX_ENC_HT;
379 if (gi)
380 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
381 if (i > 31)
382 return -EINVAL;
383 break;
384 case MT_PHY_TYPE_VHT:
385 status->nss = nss;
386 status->encoding = RX_ENC_VHT;
387 if (gi)
388 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
389 if (i > 11)
390 return -EINVAL;
391 break;
392 case MT_PHY_TYPE_HE_MU:
393 case MT_PHY_TYPE_HE_SU:
394 case MT_PHY_TYPE_HE_EXT_SU:
395 case MT_PHY_TYPE_HE_TB:
396 status->nss = nss;
397 status->encoding = RX_ENC_HE;
398 i &= GENMASK(3, 0);
399
400 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
401 status->he_gi = gi;
402
403 status->he_dcm = dcm;
404 break;
405 case MT_PHY_TYPE_EHT_SU:
406 case MT_PHY_TYPE_EHT_TRIG:
407 case MT_PHY_TYPE_EHT_MU:
408 status->nss = nss;
409 status->encoding = RX_ENC_EHT;
410 i &= GENMASK(3, 0);
411
412 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
413 status->eht.gi = gi;
414 break;
415 default:
416 return -EINVAL;
417 }
418 status->rate_idx = i;
419
420 switch (bw) {
421 case IEEE80211_STA_RX_BW_20:
422 break;
423 case IEEE80211_STA_RX_BW_40:
424 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
425 (idx & MT_PRXV_TX_ER_SU_106T)) {
426 status->bw = RATE_INFO_BW_HE_RU;
427 status->he_ru =
428 NL80211_RATE_INFO_HE_RU_ALLOC_106;
429 } else {
430 status->bw = RATE_INFO_BW_40;
431 }
432 break;
433 case IEEE80211_STA_RX_BW_80:
434 status->bw = RATE_INFO_BW_80;
435 break;
436 case IEEE80211_STA_RX_BW_160:
437 status->bw = RATE_INFO_BW_160;
438 break;
439 case IEEE80211_STA_RX_BW_320:
440 status->bw = RATE_INFO_BW_320;
441 break;
442 default:
443 return -EINVAL;
444 }
445
446 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
447 if (*mode < MT_PHY_TYPE_HE_SU && gi)
448 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
449
450 return 0;
451 }
452
453 static int
mt7996_mac_fill_rx(struct mt7996_dev * dev,struct sk_buff * skb)454 mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
455 {
456 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
457 struct mt76_phy *mphy = &dev->mt76.phy;
458 struct mt7996_phy *phy = &dev->phy;
459 struct ieee80211_supported_band *sband;
460 __le32 *rxd = (__le32 *)skb->data;
461 __le32 *rxv = NULL;
462 u32 rxd0 = le32_to_cpu(rxd[0]);
463 u32 rxd1 = le32_to_cpu(rxd[1]);
464 u32 rxd2 = le32_to_cpu(rxd[2]);
465 u32 rxd3 = le32_to_cpu(rxd[3]);
466 u32 rxd4 = le32_to_cpu(rxd[4]);
467 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
468 u32 csum_status = *(u32 *)skb->cb;
469 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
470 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
471 bool unicast, insert_ccmp_hdr = false;
472 u8 remove_pad, amsdu_info, band_idx;
473 u8 mode = 0, qos_ctl = 0;
474 bool hdr_trans;
475 u16 hdr_gap;
476 u16 seq_ctrl = 0;
477 __le16 fc = 0;
478 int idx;
479
480 memset(status, 0, sizeof(*status));
481
482 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
483 mphy = dev->mt76.phys[band_idx];
484 phy = mphy->priv;
485 status->phy_idx = mphy->band_idx;
486
487 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
488 return -EINVAL;
489
490 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
491 return -EINVAL;
492
493 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
494 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
495 return -EINVAL;
496
497 /* ICV error or CCMP/BIP/WPI MIC error */
498 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
499 status->flag |= RX_FLAG_ONLY_MONITOR;
500
501 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
502 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
503 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
504
505 if (status->wcid) {
506 struct mt7996_sta *msta;
507
508 msta = container_of(status->wcid, struct mt7996_sta, wcid);
509 spin_lock_bh(&dev->mt76.sta_poll_lock);
510 if (list_empty(&msta->wcid.poll_list))
511 list_add_tail(&msta->wcid.poll_list,
512 &dev->mt76.sta_poll_list);
513 spin_unlock_bh(&dev->mt76.sta_poll_lock);
514 }
515
516 status->freq = mphy->chandef.chan->center_freq;
517 status->band = mphy->chandef.chan->band;
518 if (status->band == NL80211_BAND_5GHZ)
519 sband = &mphy->sband_5g.sband;
520 else if (status->band == NL80211_BAND_6GHZ)
521 sband = &mphy->sband_6g.sband;
522 else
523 sband = &mphy->sband_2g.sband;
524
525 if (!sband->channels)
526 return -EINVAL;
527
528 if ((rxd0 & csum_mask) == csum_mask &&
529 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
530 skb->ip_summed = CHECKSUM_UNNECESSARY;
531
532 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
533 status->flag |= RX_FLAG_FAILED_FCS_CRC;
534
535 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
536 status->flag |= RX_FLAG_MMIC_ERROR;
537
538 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
539 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
540 status->flag |= RX_FLAG_DECRYPTED;
541 status->flag |= RX_FLAG_IV_STRIPPED;
542 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
543 }
544
545 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
546
547 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
548 return -EINVAL;
549
550 rxd += 8;
551 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
552 u32 v0 = le32_to_cpu(rxd[0]);
553 u32 v2 = le32_to_cpu(rxd[2]);
554
555 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
556 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
557 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
558
559 rxd += 4;
560 if ((u8 *)rxd - skb->data >= skb->len)
561 return -EINVAL;
562 }
563
564 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
565 u8 *data = (u8 *)rxd;
566
567 if (status->flag & RX_FLAG_DECRYPTED) {
568 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
569 case MT_CIPHER_AES_CCMP:
570 case MT_CIPHER_CCMP_CCX:
571 case MT_CIPHER_CCMP_256:
572 insert_ccmp_hdr =
573 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
574 fallthrough;
575 case MT_CIPHER_TKIP:
576 case MT_CIPHER_TKIP_NO_MIC:
577 case MT_CIPHER_GCMP:
578 case MT_CIPHER_GCMP_256:
579 status->iv[0] = data[5];
580 status->iv[1] = data[4];
581 status->iv[2] = data[3];
582 status->iv[3] = data[2];
583 status->iv[4] = data[1];
584 status->iv[5] = data[0];
585 break;
586 default:
587 break;
588 }
589 }
590 rxd += 4;
591 if ((u8 *)rxd - skb->data >= skb->len)
592 return -EINVAL;
593 }
594
595 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
596 status->timestamp = le32_to_cpu(rxd[0]);
597 status->flag |= RX_FLAG_MACTIME_START;
598
599 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
600 status->flag |= RX_FLAG_AMPDU_DETAILS;
601
602 /* all subframes of an A-MPDU have the same timestamp */
603 if (phy->rx_ampdu_ts != status->timestamp) {
604 if (!++phy->ampdu_ref)
605 phy->ampdu_ref++;
606 }
607 phy->rx_ampdu_ts = status->timestamp;
608
609 status->ampdu_ref = phy->ampdu_ref;
610 }
611
612 rxd += 4;
613 if ((u8 *)rxd - skb->data >= skb->len)
614 return -EINVAL;
615 }
616
617 /* RXD Group 3 - P-RXV */
618 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
619 u32 v3;
620 int ret;
621
622 rxv = rxd;
623 rxd += 4;
624 if ((u8 *)rxd - skb->data >= skb->len)
625 return -EINVAL;
626
627 v3 = le32_to_cpu(rxv[3]);
628
629 status->chains = mphy->antenna_mask;
630 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
631 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
632 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
633 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
634
635 /* RXD Group 5 - C-RXV */
636 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
637 rxd += 24;
638 if ((u8 *)rxd - skb->data >= skb->len)
639 return -EINVAL;
640 }
641
642 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
643 if (ret < 0)
644 return ret;
645 }
646
647 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
648 status->amsdu = !!amsdu_info;
649 if (status->amsdu) {
650 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
651 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
652 }
653
654 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
655 if (hdr_trans && ieee80211_has_morefrags(fc)) {
656 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
657 return -EINVAL;
658 hdr_trans = false;
659 } else {
660 int pad_start = 0;
661
662 skb_pull(skb, hdr_gap);
663 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
664 pad_start = ieee80211_get_hdrlen_from_skb(skb);
665 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
666 /* When header translation failure is indicated,
667 * the hardware will insert an extra 2-byte field
668 * containing the data length after the protocol
669 * type field. This happens either when the LLC-SNAP
670 * pattern did not match, or if a VLAN header was
671 * detected.
672 */
673 pad_start = 12;
674 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
675 pad_start += 4;
676 else
677 pad_start = 0;
678 }
679
680 if (pad_start) {
681 memmove(skb->data + 2, skb->data, pad_start);
682 skb_pull(skb, 2);
683 }
684 }
685
686 if (!hdr_trans) {
687 struct ieee80211_hdr *hdr;
688
689 if (insert_ccmp_hdr) {
690 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
691
692 mt76_insert_ccmp_hdr(skb, key_id);
693 }
694
695 hdr = mt76_skb_get_hdr(skb);
696 fc = hdr->frame_control;
697 if (ieee80211_is_data_qos(fc)) {
698 u8 *qos = ieee80211_get_qos_ctl(hdr);
699
700 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
701 qos_ctl = *qos;
702
703 /* Mesh DA/SA/Length will be stripped after hardware
704 * de-amsdu, so here needs to clear amsdu present bit
705 * to mark it as a normal mesh frame.
706 */
707 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
708 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
709 }
710 } else {
711 status->flag |= RX_FLAG_8023;
712 }
713
714 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
715 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
716
717 if (!status->wcid || !ieee80211_is_data_qos(fc))
718 return 0;
719
720 status->aggr = unicast &&
721 !ieee80211_is_qos_nullfunc(fc);
722 status->qos_ctl = qos_ctl;
723 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
724
725 return 0;
726 }
727
728 static void
mt7996_mac_write_txwi_8023(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)729 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
730 struct sk_buff *skb, struct mt76_wcid *wcid)
731 {
732 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
733 u8 fc_type, fc_stype;
734 u16 ethertype;
735 bool wmm = false;
736 u32 val;
737
738 if (wcid->sta) {
739 struct ieee80211_sta *sta;
740
741 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
742 wmm = sta->wme;
743 }
744
745 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
746 FIELD_PREP(MT_TXD1_TID, tid);
747
748 ethertype = get_unaligned_be16(&skb->data[12]);
749 if (ethertype >= ETH_P_802_3_MIN)
750 val |= MT_TXD1_ETH_802_3;
751
752 txwi[1] |= cpu_to_le32(val);
753
754 fc_type = IEEE80211_FTYPE_DATA >> 2;
755 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
756
757 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
758 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
759
760 txwi[2] |= cpu_to_le32(val);
761 }
762
763 static void
mt7996_mac_write_txwi_80211(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key)764 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
765 struct sk_buff *skb, struct ieee80211_key_conf *key)
766 {
767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
768 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
769 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
770 bool multicast = is_multicast_ether_addr(hdr->addr1);
771 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
772 __le16 fc = hdr->frame_control;
773 u8 fc_type, fc_stype;
774 u32 val;
775
776 if (ieee80211_is_action(fc) &&
777 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
778 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
779 tid = MT_TX_ADDBA;
780 else if (ieee80211_is_mgmt(hdr->frame_control))
781 tid = MT_TX_NORMAL;
782
783 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
784 FIELD_PREP(MT_TXD1_HDR_INFO,
785 ieee80211_get_hdrlen_from_skb(skb) / 2) |
786 FIELD_PREP(MT_TXD1_TID, tid);
787
788 if (!ieee80211_is_data(fc) || multicast ||
789 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
790 val |= MT_TXD1_FIXED_RATE;
791
792 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
793 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
794 val |= MT_TXD1_BIP;
795 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
796 }
797
798 txwi[1] |= cpu_to_le32(val);
799
800 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
801 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
802
803 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
804 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
805
806 txwi[2] |= cpu_to_le32(val);
807
808 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
809 if (ieee80211_is_beacon(fc)) {
810 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
811 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
812 }
813
814 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
815 u16 seqno = le16_to_cpu(hdr->seq_ctrl);
816
817 if (ieee80211_is_back_req(hdr->frame_control)) {
818 struct ieee80211_bar *bar;
819
820 bar = (struct ieee80211_bar *)skb->data;
821 seqno = le16_to_cpu(bar->start_seq_num);
822 }
823
824 val = MT_TXD3_SN_VALID |
825 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
826 txwi[3] |= cpu_to_le32(val);
827 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
828 }
829 }
830
mt7996_mac_write_txwi(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,int pid,enum mt76_txq_id qid,u32 changed)831 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
832 struct sk_buff *skb, struct mt76_wcid *wcid,
833 struct ieee80211_key_conf *key, int pid,
834 enum mt76_txq_id qid, u32 changed)
835 {
836 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
837 struct ieee80211_vif *vif = info->control.vif;
838 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
839 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
840 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
841 struct mt76_vif *mvif;
842 u16 tx_count = 15;
843 u32 val;
844 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
845 BSS_CHANGED_BEACON_ENABLED));
846 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
847 BSS_CHANGED_FILS_DISCOVERY));
848
849 mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
850 if (mvif) {
851 omac_idx = mvif->omac_idx;
852 wmm_idx = mvif->wmm_idx;
853 band_idx = mvif->band_idx;
854 }
855
856 if (inband_disc) {
857 p_fmt = MT_TX_TYPE_FW;
858 q_idx = MT_LMAC_ALTX0;
859 } else if (beacon) {
860 p_fmt = MT_TX_TYPE_FW;
861 q_idx = MT_LMAC_BCN0;
862 } else if (qid >= MT_TXQ_PSD) {
863 p_fmt = MT_TX_TYPE_CT;
864 q_idx = MT_LMAC_ALTX0;
865 } else {
866 p_fmt = MT_TX_TYPE_CT;
867 q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
868 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
869 }
870
871 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
872 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
873 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
874 txwi[0] = cpu_to_le32(val);
875
876 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
877 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
878
879 if (band_idx)
880 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
881
882 txwi[1] = cpu_to_le32(val);
883 txwi[2] = 0;
884
885 val = MT_TXD3_SW_POWER_MGMT |
886 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
887 if (key)
888 val |= MT_TXD3_PROTECT_FRAME;
889 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
890 val |= MT_TXD3_NO_ACK;
891 if (wcid->amsdu)
892 val |= MT_TXD3_HW_AMSDU;
893
894 txwi[3] = cpu_to_le32(val);
895 txwi[4] = 0;
896
897 val = FIELD_PREP(MT_TXD5_PID, pid);
898 if (pid >= MT_PACKET_ID_FIRST)
899 val |= MT_TXD5_TX_STATUS_HOST;
900 txwi[5] = cpu_to_le32(val);
901
902 val = MT_TXD6_DIS_MAT | MT_TXD6_DAS |
903 FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
904 txwi[6] = cpu_to_le32(val);
905 txwi[7] = 0;
906
907 if (is_8023)
908 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
909 else
910 mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
911
912 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
913 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
914 bool mcast = ieee80211_is_data(hdr->frame_control) &&
915 is_multicast_ether_addr(hdr->addr1);
916 u8 idx = MT7996_BASIC_RATES_TBL;
917
918 if (mvif) {
919 if (mcast && mvif->mcast_rates_idx)
920 idx = mvif->mcast_rates_idx;
921 else if (beacon && mvif->beacon_rates_idx)
922 idx = mvif->beacon_rates_idx;
923 else
924 idx = mvif->basic_rates_idx;
925 }
926
927 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
928 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
929 }
930 }
931
mt7996_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)932 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
933 enum mt76_txq_id qid, struct mt76_wcid *wcid,
934 struct ieee80211_sta *sta,
935 struct mt76_tx_info *tx_info)
936 {
937 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
938 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
939 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
940 struct ieee80211_key_conf *key = info->control.hw_key;
941 struct ieee80211_vif *vif = info->control.vif;
942 struct mt76_connac_txp_common *txp;
943 struct mt76_txwi_cache *t;
944 int id, i, pid, nbuf = tx_info->nbuf - 1;
945 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
946 u8 *txwi = (u8 *)txwi_ptr;
947
948 if (unlikely(tx_info->skb->len <= ETH_HLEN))
949 return -EINVAL;
950
951 if (!wcid)
952 wcid = &dev->mt76.global_wcid;
953
954 if (sta) {
955 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
956
957 if (time_after(jiffies, msta->jiffies + HZ / 4)) {
958 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
959 msta->jiffies = jiffies;
960 }
961 }
962
963 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
964 t->skb = tx_info->skb;
965
966 id = mt76_token_consume(mdev, &t);
967 if (id < 0)
968 return id;
969
970 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
971 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
972 pid, qid, 0);
973
974 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
975 for (i = 0; i < nbuf; i++) {
976 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
977 txp->fw.len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
978 }
979 txp->fw.nbuf = nbuf;
980
981 txp->fw.flags =
982 cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD);
983
984 if (!key)
985 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
986
987 if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
988 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
989
990 if (vif) {
991 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
992
993 txp->fw.bss_idx = mvif->mt76.idx;
994 }
995
996 txp->fw.token = cpu_to_le16(id);
997 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
998 txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
999 else
1000 txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
1001 tx_info->skb = DMA_DUMMY_DATA;
1002
1003 /* pass partial skb header to fw */
1004 tx_info->buf[1].len = MT_CT_PARSE_LEN;
1005 tx_info->buf[1].skip_unmap = true;
1006 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1007
1008 return 0;
1009 }
1010
1011 static void
mt7996_tx_check_aggr(struct ieee80211_sta * sta,__le32 * txwi)1012 mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
1013 {
1014 struct mt7996_sta *msta;
1015 u16 fc, tid;
1016 u32 val;
1017
1018 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
1019 return;
1020
1021 tid = le32_get_bits(txwi[1], MT_TXD1_TID);
1022 if (tid >= 6) /* skip VO queue */
1023 return;
1024
1025 val = le32_to_cpu(txwi[2]);
1026 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
1027 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
1028 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1029 return;
1030
1031 msta = (struct mt7996_sta *)sta->drv_priv;
1032 if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
1033 ieee80211_start_tx_ba_session(sta, tid, 0);
1034 }
1035
1036 static void
mt7996_txwi_free(struct mt7996_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_sta * sta,struct list_head * free_list)1037 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1038 struct ieee80211_sta *sta, struct list_head *free_list)
1039 {
1040 struct mt76_dev *mdev = &dev->mt76;
1041 struct mt76_wcid *wcid;
1042 __le32 *txwi;
1043 u16 wcid_idx;
1044
1045 mt76_connac_txp_skb_unmap(mdev, t);
1046 if (!t->skb)
1047 goto out;
1048
1049 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1050 if (sta) {
1051 wcid = (struct mt76_wcid *)sta->drv_priv;
1052 wcid_idx = wcid->idx;
1053
1054 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1055 mt7996_tx_check_aggr(sta, txwi);
1056 } else {
1057 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1058 }
1059
1060 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1061
1062 out:
1063 t->skb = NULL;
1064 mt76_put_txwi(mdev, t);
1065 }
1066
1067 static void
mt7996_mac_tx_free(struct mt7996_dev * dev,void * data,int len)1068 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1069 {
1070 __le32 *tx_free = (__le32 *)data, *cur_info;
1071 struct mt76_dev *mdev = &dev->mt76;
1072 struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1073 struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1074 struct mt76_txwi_cache *txwi;
1075 struct ieee80211_sta *sta = NULL;
1076 LIST_HEAD(free_list);
1077 struct sk_buff *skb, *tmp;
1078 #if defined(__linux__)
1079 void *end = data + len;
1080 #elif defined(__FreeBSD__)
1081 void *end = (u8 *)data + len;
1082 #endif
1083 bool wake = false;
1084 u16 total, count = 0;
1085
1086 /* clean DMA queues and unmap buffers first */
1087 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1088 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1089 if (phy2) {
1090 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1091 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1092 }
1093 if (phy3) {
1094 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1095 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1096 }
1097
1098 if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
1099 return;
1100
1101 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1102 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1103 u32 msdu, info;
1104 u8 i;
1105
1106 if (WARN_ON_ONCE((void *)cur_info >= end))
1107 return;
1108 /* 1'b1: new wcid pair.
1109 * 1'b0: msdu_id with the same 'wcid pair' as above.
1110 */
1111 info = le32_to_cpu(*cur_info);
1112 if (info & MT_TXFREE_INFO_PAIR) {
1113 struct mt7996_sta *msta;
1114 struct mt76_wcid *wcid;
1115 u16 idx;
1116
1117 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1118 wcid = rcu_dereference(dev->mt76.wcid[idx]);
1119 sta = wcid_to_sta(wcid);
1120 if (!sta)
1121 continue;
1122
1123 msta = container_of(wcid, struct mt7996_sta, wcid);
1124 spin_lock_bh(&mdev->sta_poll_lock);
1125 if (list_empty(&msta->wcid.poll_list))
1126 list_add_tail(&msta->wcid.poll_list,
1127 &mdev->sta_poll_list);
1128 spin_unlock_bh(&mdev->sta_poll_lock);
1129 continue;
1130 }
1131
1132 if (info & MT_TXFREE_INFO_HEADER)
1133 continue;
1134
1135 for (i = 0; i < 2; i++) {
1136 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1137 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1138 continue;
1139
1140 count++;
1141 txwi = mt76_token_release(mdev, msdu, &wake);
1142 if (!txwi)
1143 continue;
1144
1145 mt7996_txwi_free(dev, txwi, sta, &free_list);
1146 }
1147 }
1148
1149 mt7996_mac_sta_poll(dev);
1150
1151 if (wake)
1152 mt76_set_tx_blocked(&dev->mt76, false);
1153
1154 mt76_worker_schedule(&dev->mt76.tx_worker);
1155
1156 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1157 skb_list_del_init(skb);
1158 napi_consume_skb(skb, 1);
1159 }
1160 }
1161
1162 static bool
mt7996_mac_add_txs_skb(struct mt7996_dev * dev,struct mt76_wcid * wcid,int pid,__le32 * txs_data)1163 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
1164 int pid, __le32 *txs_data)
1165 {
1166 struct mt76_sta_stats *stats = &wcid->stats;
1167 struct ieee80211_supported_band *sband;
1168 struct mt76_dev *mdev = &dev->mt76;
1169 struct mt76_phy *mphy;
1170 struct ieee80211_tx_info *info;
1171 struct sk_buff_head list;
1172 struct rate_info rate = {};
1173 struct sk_buff *skb;
1174 bool cck = false;
1175 u32 txrate, txs, mode, stbc;
1176
1177 mt76_tx_status_lock(mdev, &list);
1178 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1179 if (!skb)
1180 goto out_no_skb;
1181
1182 txs = le32_to_cpu(txs_data[0]);
1183
1184 info = IEEE80211_SKB_CB(skb);
1185 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1186 info->flags |= IEEE80211_TX_STAT_ACK;
1187
1188 info->status.ampdu_len = 1;
1189 info->status.ampdu_ack_len = !!(info->flags &
1190 IEEE80211_TX_STAT_ACK);
1191
1192 info->status.rates[0].idx = -1;
1193
1194 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1195
1196 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1197 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1198 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1199
1200 if (stbc && rate.nss > 1)
1201 rate.nss >>= 1;
1202
1203 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1204 stats->tx_nss[rate.nss - 1]++;
1205 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1206 stats->tx_mcs[rate.mcs]++;
1207
1208 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1209 switch (mode) {
1210 case MT_PHY_TYPE_CCK:
1211 cck = true;
1212 fallthrough;
1213 case MT_PHY_TYPE_OFDM:
1214 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1215
1216 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1217 sband = &mphy->sband_5g.sband;
1218 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1219 sband = &mphy->sband_6g.sband;
1220 else
1221 sband = &mphy->sband_2g.sband;
1222
1223 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1224 rate.legacy = sband->bitrates[rate.mcs].bitrate;
1225 break;
1226 case MT_PHY_TYPE_HT:
1227 case MT_PHY_TYPE_HT_GF:
1228 if (rate.mcs > 31)
1229 goto out;
1230
1231 rate.flags = RATE_INFO_FLAGS_MCS;
1232 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1233 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1234 break;
1235 case MT_PHY_TYPE_VHT:
1236 if (rate.mcs > 9)
1237 goto out;
1238
1239 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1240 break;
1241 case MT_PHY_TYPE_HE_SU:
1242 case MT_PHY_TYPE_HE_EXT_SU:
1243 case MT_PHY_TYPE_HE_TB:
1244 case MT_PHY_TYPE_HE_MU:
1245 if (rate.mcs > 11)
1246 goto out;
1247
1248 rate.he_gi = wcid->rate.he_gi;
1249 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1250 rate.flags = RATE_INFO_FLAGS_HE_MCS;
1251 break;
1252 case MT_PHY_TYPE_EHT_SU:
1253 case MT_PHY_TYPE_EHT_TRIG:
1254 case MT_PHY_TYPE_EHT_MU:
1255 if (rate.mcs > 13)
1256 goto out;
1257
1258 rate.eht_gi = wcid->rate.eht_gi;
1259 rate.flags = RATE_INFO_FLAGS_EHT_MCS;
1260 break;
1261 default:
1262 goto out;
1263 }
1264
1265 stats->tx_mode[mode]++;
1266
1267 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1268 case IEEE80211_STA_RX_BW_320:
1269 rate.bw = RATE_INFO_BW_320;
1270 stats->tx_bw[4]++;
1271 break;
1272 case IEEE80211_STA_RX_BW_160:
1273 rate.bw = RATE_INFO_BW_160;
1274 stats->tx_bw[3]++;
1275 break;
1276 case IEEE80211_STA_RX_BW_80:
1277 rate.bw = RATE_INFO_BW_80;
1278 stats->tx_bw[2]++;
1279 break;
1280 case IEEE80211_STA_RX_BW_40:
1281 rate.bw = RATE_INFO_BW_40;
1282 stats->tx_bw[1]++;
1283 break;
1284 default:
1285 rate.bw = RATE_INFO_BW_20;
1286 stats->tx_bw[0]++;
1287 break;
1288 }
1289 wcid->rate = rate;
1290
1291 out:
1292 mt76_tx_status_skb_done(mdev, skb, &list);
1293
1294 out_no_skb:
1295 mt76_tx_status_unlock(mdev, &list);
1296
1297 return !!skb;
1298 }
1299
mt7996_mac_add_txs(struct mt7996_dev * dev,void * data)1300 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1301 {
1302 struct mt7996_sta *msta = NULL;
1303 struct mt76_wcid *wcid;
1304 __le32 *txs_data = data;
1305 u16 wcidx;
1306 u8 pid;
1307
1308 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
1309 return;
1310
1311 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1312 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1313
1314 if (pid < MT_PACKET_ID_FIRST)
1315 return;
1316
1317 if (wcidx >= mt7996_wtbl_size(dev))
1318 return;
1319
1320 rcu_read_lock();
1321
1322 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1323 if (!wcid)
1324 goto out;
1325
1326 msta = container_of(wcid, struct mt7996_sta, wcid);
1327
1328 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
1329
1330 if (!wcid->sta)
1331 goto out;
1332
1333 spin_lock_bh(&dev->mt76.sta_poll_lock);
1334 if (list_empty(&msta->wcid.poll_list))
1335 list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
1336 spin_unlock_bh(&dev->mt76.sta_poll_lock);
1337
1338 out:
1339 rcu_read_unlock();
1340 }
1341
mt7996_rx_check(struct mt76_dev * mdev,void * data,int len)1342 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1343 {
1344 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1345 __le32 *rxd = (__le32 *)data;
1346 __le32 *end = (__le32 *)&rxd[len / 4];
1347 enum rx_pkt_type type;
1348
1349 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1350 if (type != PKT_TYPE_NORMAL) {
1351 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1352
1353 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1354 MT_RXD0_SW_PKT_TYPE_FRAME))
1355 return true;
1356 }
1357
1358 switch (type) {
1359 case PKT_TYPE_TXRX_NOTIFY:
1360 mt7996_mac_tx_free(dev, data, len);
1361 return false;
1362 case PKT_TYPE_TXS:
1363 for (rxd += 4; rxd + 8 <= end; rxd += 8)
1364 mt7996_mac_add_txs(dev, rxd);
1365 return false;
1366 case PKT_TYPE_RX_FW_MONITOR:
1367 mt7996_debugfs_rx_fw_monitor(dev, data, len);
1368 return false;
1369 default:
1370 return true;
1371 }
1372 }
1373
mt7996_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1374 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1375 struct sk_buff *skb, u32 *info)
1376 {
1377 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1378 __le32 *rxd = (__le32 *)skb->data;
1379 __le32 *end = (__le32 *)&skb->data[skb->len];
1380 enum rx_pkt_type type;
1381
1382 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1383 if (type != PKT_TYPE_NORMAL) {
1384 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1385
1386 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1387 MT_RXD0_SW_PKT_TYPE_FRAME))
1388 type = PKT_TYPE_NORMAL;
1389 }
1390
1391 switch (type) {
1392 case PKT_TYPE_TXRX_NOTIFY:
1393 mt7996_mac_tx_free(dev, skb->data, skb->len);
1394 napi_consume_skb(skb, 1);
1395 break;
1396 case PKT_TYPE_RX_EVENT:
1397 mt7996_mcu_rx_event(dev, skb);
1398 break;
1399 case PKT_TYPE_TXS:
1400 for (rxd += 4; rxd + 8 <= end; rxd += 8)
1401 mt7996_mac_add_txs(dev, rxd);
1402 dev_kfree_skb(skb);
1403 break;
1404 case PKT_TYPE_RX_FW_MONITOR:
1405 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1406 dev_kfree_skb(skb);
1407 break;
1408 case PKT_TYPE_NORMAL:
1409 if (!mt7996_mac_fill_rx(dev, skb)) {
1410 mt76_rx(&dev->mt76, q, skb);
1411 return;
1412 }
1413 fallthrough;
1414 default:
1415 dev_kfree_skb(skb);
1416 break;
1417 }
1418 }
1419
mt7996_mac_cca_stats_reset(struct mt7996_phy * phy)1420 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1421 {
1422 struct mt7996_dev *dev = phy->dev;
1423 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
1424
1425 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
1426 mt76_set(dev, reg, BIT(11) | BIT(9));
1427 }
1428
mt7996_mac_reset_counters(struct mt7996_phy * phy)1429 void mt7996_mac_reset_counters(struct mt7996_phy *phy)
1430 {
1431 struct mt7996_dev *dev = phy->dev;
1432 u8 band_idx = phy->mt76->band_idx;
1433 int i;
1434
1435 for (i = 0; i < 16; i++)
1436 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
1437
1438 phy->mt76->survey_time = ktime_get_boottime();
1439
1440 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1441
1442 /* reset airtime counters */
1443 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
1444 MT_WF_RMAC_MIB_RXTIME_CLR);
1445
1446 mt7996_mcu_get_chan_mib_info(phy, true);
1447 }
1448
mt7996_mac_set_coverage_class(struct mt7996_phy * phy)1449 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
1450 {
1451 s16 coverage_class = phy->coverage_class;
1452 struct mt7996_dev *dev = phy->dev;
1453 struct mt7996_phy *phy2 = mt7996_phy2(dev);
1454 struct mt7996_phy *phy3 = mt7996_phy3(dev);
1455 u32 reg_offset;
1456 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1457 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1458 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1459 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1460 u8 band_idx = phy->mt76->band_idx;
1461 int offset;
1462
1463 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1464 return;
1465
1466 if (phy2)
1467 coverage_class = max_t(s16, dev->phy.coverage_class,
1468 phy2->coverage_class);
1469
1470 if (phy3)
1471 coverage_class = max_t(s16, coverage_class,
1472 phy3->coverage_class);
1473
1474 offset = 3 * coverage_class;
1475 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1476 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1477
1478 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
1479 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
1480 }
1481
mt7996_mac_enable_nf(struct mt7996_dev * dev,u8 band)1482 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
1483 {
1484 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
1485 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
1486 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
1487
1488 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
1489 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
1490 }
1491
1492 static u8
mt7996_phy_get_nf(struct mt7996_phy * phy,u8 band_idx)1493 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
1494 {
1495 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1496 struct mt7996_dev *dev = phy->dev;
1497 u32 val, sum = 0, n = 0;
1498 int ant, i;
1499
1500 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
1501 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
1502
1503 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1504 val = mt76_rr(dev, reg);
1505 sum += val * nf_power[i];
1506 n += val;
1507 }
1508 }
1509
1510 return n ? sum / n : 0;
1511 }
1512
mt7996_update_channel(struct mt76_phy * mphy)1513 void mt7996_update_channel(struct mt76_phy *mphy)
1514 {
1515 struct mt7996_phy *phy = (struct mt7996_phy *)mphy->priv;
1516 struct mt76_channel_state *state = mphy->chan_state;
1517 int nf;
1518
1519 mt7996_mcu_get_chan_mib_info(phy, false);
1520
1521 nf = mt7996_phy_get_nf(phy, mphy->band_idx);
1522 if (!phy->noise)
1523 phy->noise = nf << 4;
1524 else if (nf)
1525 phy->noise += nf - (phy->noise >> 4);
1526
1527 state->noise = -(phy->noise >> 4);
1528 }
1529
1530 static bool
mt7996_wait_reset_state(struct mt7996_dev * dev,u32 state)1531 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
1532 {
1533 bool ret;
1534
1535 ret = wait_event_timeout(dev->reset_wait,
1536 (READ_ONCE(dev->recovery.state) & state),
1537 MT7996_RESET_TIMEOUT);
1538
1539 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1540 return ret;
1541 }
1542
1543 static void
mt7996_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)1544 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1545 {
1546 struct ieee80211_hw *hw = priv;
1547
1548 switch (vif->type) {
1549 case NL80211_IFTYPE_MESH_POINT:
1550 case NL80211_IFTYPE_ADHOC:
1551 case NL80211_IFTYPE_AP:
1552 mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
1553 break;
1554 default:
1555 break;
1556 }
1557 }
1558
1559 static void
mt7996_update_beacons(struct mt7996_dev * dev)1560 mt7996_update_beacons(struct mt7996_dev *dev)
1561 {
1562 struct mt76_phy *phy2, *phy3;
1563
1564 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1565 IEEE80211_IFACE_ITER_RESUME_ALL,
1566 mt7996_update_vif_beacon, dev->mt76.hw);
1567
1568 phy2 = dev->mt76.phys[MT_BAND1];
1569 if (!phy2)
1570 return;
1571
1572 ieee80211_iterate_active_interfaces(phy2->hw,
1573 IEEE80211_IFACE_ITER_RESUME_ALL,
1574 mt7996_update_vif_beacon, phy2->hw);
1575
1576 phy3 = dev->mt76.phys[MT_BAND2];
1577 if (!phy3)
1578 return;
1579
1580 ieee80211_iterate_active_interfaces(phy3->hw,
1581 IEEE80211_IFACE_ITER_RESUME_ALL,
1582 mt7996_update_vif_beacon, phy3->hw);
1583 }
1584
mt7996_tx_token_put(struct mt7996_dev * dev)1585 void mt7996_tx_token_put(struct mt7996_dev *dev)
1586 {
1587 struct mt76_txwi_cache *txwi;
1588 int id;
1589
1590 spin_lock_bh(&dev->mt76.token_lock);
1591 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1592 mt7996_txwi_free(dev, txwi, NULL, NULL);
1593 dev->mt76.token_count--;
1594 }
1595 spin_unlock_bh(&dev->mt76.token_lock);
1596 idr_destroy(&dev->mt76.token);
1597 }
1598
1599 static int
mt7996_mac_restart(struct mt7996_dev * dev)1600 mt7996_mac_restart(struct mt7996_dev *dev)
1601 {
1602 struct mt7996_phy *phy2, *phy3;
1603 struct mt76_dev *mdev = &dev->mt76;
1604 int i, ret;
1605
1606 phy2 = mt7996_phy2(dev);
1607 phy3 = mt7996_phy3(dev);
1608
1609 if (dev->hif2) {
1610 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1611 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1612 }
1613
1614 if (dev_is_pci(mdev->dev)) {
1615 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1616 if (dev->hif2)
1617 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1618 }
1619
1620 set_bit(MT76_RESET, &dev->mphy.state);
1621 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1622 wake_up(&dev->mt76.mcu.wait);
1623 if (phy2) {
1624 set_bit(MT76_RESET, &phy2->mt76->state);
1625 set_bit(MT76_MCU_RESET, &phy2->mt76->state);
1626 }
1627 if (phy3) {
1628 set_bit(MT76_RESET, &phy3->mt76->state);
1629 set_bit(MT76_MCU_RESET, &phy3->mt76->state);
1630 }
1631
1632 /* lock/unlock all queues to ensure that no tx is pending */
1633 mt76_txq_schedule_all(&dev->mphy);
1634 if (phy2)
1635 mt76_txq_schedule_all(phy2->mt76);
1636 if (phy3)
1637 mt76_txq_schedule_all(phy3->mt76);
1638
1639 /* disable all tx/rx napi */
1640 mt76_worker_disable(&dev->mt76.tx_worker);
1641 mt76_for_each_q_rx(mdev, i) {
1642 if (mdev->q_rx[i].ndesc)
1643 napi_disable(&dev->mt76.napi[i]);
1644 }
1645 napi_disable(&dev->mt76.tx_napi);
1646
1647 /* token reinit */
1648 mt7996_tx_token_put(dev);
1649 idr_init(&dev->mt76.token);
1650
1651 mt7996_dma_reset(dev, true);
1652
1653 local_bh_disable();
1654 mt76_for_each_q_rx(mdev, i) {
1655 if (mdev->q_rx[i].ndesc) {
1656 napi_enable(&dev->mt76.napi[i]);
1657 napi_schedule(&dev->mt76.napi[i]);
1658 }
1659 }
1660 local_bh_enable();
1661 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1662 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1663
1664 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1665 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1666 if (dev->hif2) {
1667 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1668 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1669 }
1670 if (dev_is_pci(mdev->dev)) {
1671 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1672 if (dev->hif2)
1673 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1674 }
1675
1676 /* load firmware */
1677 ret = mt7996_mcu_init_firmware(dev);
1678 if (ret)
1679 goto out;
1680
1681 /* set the necessary init items */
1682 ret = mt7996_mcu_set_eeprom(dev);
1683 if (ret)
1684 goto out;
1685
1686 mt7996_mac_init(dev);
1687 mt7996_init_txpower(dev, &dev->mphy.sband_2g.sband);
1688 mt7996_init_txpower(dev, &dev->mphy.sband_5g.sband);
1689 mt7996_init_txpower(dev, &dev->mphy.sband_6g.sband);
1690 ret = mt7996_txbf_init(dev);
1691
1692 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1693 ret = mt7996_run(dev->mphy.hw);
1694 if (ret)
1695 goto out;
1696 }
1697
1698 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
1699 ret = mt7996_run(phy2->mt76->hw);
1700 if (ret)
1701 goto out;
1702 }
1703
1704 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
1705 ret = mt7996_run(phy3->mt76->hw);
1706 if (ret)
1707 goto out;
1708 }
1709
1710 out:
1711 /* reset done */
1712 clear_bit(MT76_RESET, &dev->mphy.state);
1713 if (phy2)
1714 clear_bit(MT76_RESET, &phy2->mt76->state);
1715 if (phy3)
1716 clear_bit(MT76_RESET, &phy3->mt76->state);
1717
1718 local_bh_disable();
1719 napi_enable(&dev->mt76.tx_napi);
1720 napi_schedule(&dev->mt76.tx_napi);
1721 local_bh_enable();
1722
1723 mt76_worker_enable(&dev->mt76.tx_worker);
1724 return ret;
1725 }
1726
1727 static void
mt7996_mac_full_reset(struct mt7996_dev * dev)1728 mt7996_mac_full_reset(struct mt7996_dev *dev)
1729 {
1730 struct mt7996_phy *phy2, *phy3;
1731 int i;
1732
1733 phy2 = mt7996_phy2(dev);
1734 phy3 = mt7996_phy3(dev);
1735 dev->recovery.hw_full_reset = true;
1736
1737 wake_up(&dev->mt76.mcu.wait);
1738 ieee80211_stop_queues(mt76_hw(dev));
1739 if (phy2)
1740 ieee80211_stop_queues(phy2->mt76->hw);
1741 if (phy3)
1742 ieee80211_stop_queues(phy3->mt76->hw);
1743
1744 cancel_delayed_work_sync(&dev->mphy.mac_work);
1745 if (phy2)
1746 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1747 if (phy3)
1748 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1749
1750 mutex_lock(&dev->mt76.mutex);
1751 for (i = 0; i < 10; i++) {
1752 if (!mt7996_mac_restart(dev))
1753 break;
1754 }
1755 mutex_unlock(&dev->mt76.mutex);
1756
1757 if (i == 10)
1758 dev_err(dev->mt76.dev, "chip full reset failed\n");
1759
1760 ieee80211_restart_hw(mt76_hw(dev));
1761 if (phy2)
1762 ieee80211_restart_hw(phy2->mt76->hw);
1763 if (phy3)
1764 ieee80211_restart_hw(phy3->mt76->hw);
1765
1766 ieee80211_wake_queues(mt76_hw(dev));
1767 if (phy2)
1768 ieee80211_wake_queues(phy2->mt76->hw);
1769 if (phy3)
1770 ieee80211_wake_queues(phy3->mt76->hw);
1771
1772 dev->recovery.hw_full_reset = false;
1773 ieee80211_queue_delayed_work(mt76_hw(dev),
1774 &dev->mphy.mac_work,
1775 MT7996_WATCHDOG_TIME);
1776 if (phy2)
1777 ieee80211_queue_delayed_work(phy2->mt76->hw,
1778 &phy2->mt76->mac_work,
1779 MT7996_WATCHDOG_TIME);
1780 if (phy3)
1781 ieee80211_queue_delayed_work(phy3->mt76->hw,
1782 &phy3->mt76->mac_work,
1783 MT7996_WATCHDOG_TIME);
1784 }
1785
mt7996_mac_reset_work(struct work_struct * work)1786 void mt7996_mac_reset_work(struct work_struct *work)
1787 {
1788 struct mt7996_phy *phy2, *phy3;
1789 struct mt7996_dev *dev;
1790 int i;
1791
1792 dev = container_of(work, struct mt7996_dev, reset_work);
1793 phy2 = mt7996_phy2(dev);
1794 phy3 = mt7996_phy3(dev);
1795
1796 /* chip full reset */
1797 if (dev->recovery.restart) {
1798 /* disable WA/WM WDT */
1799 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1800 MT_MCU_CMD_WDT_MASK);
1801
1802 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1803 dev->recovery.wa_reset_count++;
1804 else
1805 dev->recovery.wm_reset_count++;
1806
1807 mt7996_mac_full_reset(dev);
1808
1809 /* enable mcu irq */
1810 mt7996_irq_enable(dev, MT_INT_MCU_CMD);
1811 mt7996_irq_disable(dev, 0);
1812
1813 /* enable WA/WM WDT */
1814 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1815
1816 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1817 dev->recovery.restart = false;
1818 return;
1819 }
1820
1821 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1822 return;
1823
1824 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
1825 wiphy_name(dev->mt76.hw->wiphy));
1826 ieee80211_stop_queues(mt76_hw(dev));
1827 if (phy2)
1828 ieee80211_stop_queues(phy2->mt76->hw);
1829 if (phy3)
1830 ieee80211_stop_queues(phy3->mt76->hw);
1831
1832 set_bit(MT76_RESET, &dev->mphy.state);
1833 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1834 wake_up(&dev->mt76.mcu.wait);
1835 cancel_delayed_work_sync(&dev->mphy.mac_work);
1836 if (phy2) {
1837 set_bit(MT76_RESET, &phy2->mt76->state);
1838 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1839 }
1840 if (phy3) {
1841 set_bit(MT76_RESET, &phy3->mt76->state);
1842 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1843 }
1844 mt76_worker_disable(&dev->mt76.tx_worker);
1845 mt76_for_each_q_rx(&dev->mt76, i)
1846 napi_disable(&dev->mt76.napi[i]);
1847 napi_disable(&dev->mt76.tx_napi);
1848
1849 mutex_lock(&dev->mt76.mutex);
1850
1851 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1852
1853 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1854 mt7996_dma_reset(dev, false);
1855
1856 mt7996_tx_token_put(dev);
1857 idr_init(&dev->mt76.token);
1858
1859 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1860 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1861 }
1862
1863 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1864 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1865
1866 /* enable DMA Tx/Tx and interrupt */
1867 mt7996_dma_start(dev, false);
1868
1869 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1870 clear_bit(MT76_RESET, &dev->mphy.state);
1871 if (phy2)
1872 clear_bit(MT76_RESET, &phy2->mt76->state);
1873 if (phy3)
1874 clear_bit(MT76_RESET, &phy3->mt76->state);
1875
1876 local_bh_disable();
1877 mt76_for_each_q_rx(&dev->mt76, i) {
1878 napi_enable(&dev->mt76.napi[i]);
1879 napi_schedule(&dev->mt76.napi[i]);
1880 }
1881 local_bh_enable();
1882
1883 tasklet_schedule(&dev->mt76.irq_tasklet);
1884
1885 mt76_worker_enable(&dev->mt76.tx_worker);
1886
1887 local_bh_disable();
1888 napi_enable(&dev->mt76.tx_napi);
1889 napi_schedule(&dev->mt76.tx_napi);
1890 local_bh_enable();
1891
1892 ieee80211_wake_queues(mt76_hw(dev));
1893 if (phy2)
1894 ieee80211_wake_queues(phy2->mt76->hw);
1895 if (phy3)
1896 ieee80211_wake_queues(phy3->mt76->hw);
1897
1898 mutex_unlock(&dev->mt76.mutex);
1899
1900 mt7996_update_beacons(dev);
1901
1902 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1903 MT7996_WATCHDOG_TIME);
1904 if (phy2)
1905 ieee80211_queue_delayed_work(phy2->mt76->hw,
1906 &phy2->mt76->mac_work,
1907 MT7996_WATCHDOG_TIME);
1908 if (phy3)
1909 ieee80211_queue_delayed_work(phy3->mt76->hw,
1910 &phy3->mt76->mac_work,
1911 MT7996_WATCHDOG_TIME);
1912 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
1913 wiphy_name(dev->mt76.hw->wiphy));
1914 }
1915
1916 /* firmware coredump */
mt7996_mac_dump_work(struct work_struct * work)1917 void mt7996_mac_dump_work(struct work_struct *work)
1918 {
1919 const struct mt7996_mem_region *mem_region;
1920 struct mt7996_crash_data *crash_data;
1921 struct mt7996_dev *dev;
1922 struct mt7996_mem_hdr *hdr;
1923 size_t buf_len;
1924 int i;
1925 u32 num;
1926 u8 *buf;
1927
1928 dev = container_of(work, struct mt7996_dev, dump_work);
1929
1930 mutex_lock(&dev->dump_mutex);
1931
1932 crash_data = mt7996_coredump_new(dev);
1933 if (!crash_data) {
1934 mutex_unlock(&dev->dump_mutex);
1935 goto skip_coredump;
1936 }
1937
1938 mem_region = mt7996_coredump_get_mem_layout(dev, &num);
1939 if (!mem_region || !crash_data->memdump_buf_len) {
1940 mutex_unlock(&dev->dump_mutex);
1941 goto skip_memdump;
1942 }
1943
1944 buf = crash_data->memdump_buf;
1945 buf_len = crash_data->memdump_buf_len;
1946
1947 /* dumping memory content... */
1948 memset(buf, 0, buf_len);
1949 for (i = 0; i < num; i++) {
1950 if (mem_region->len > buf_len) {
1951 dev_warn(dev->mt76.dev, "%s len %zu is too large\n",
1952 mem_region->name, mem_region->len);
1953 break;
1954 }
1955
1956 /* reserve space for the header */
1957 hdr = (void *)buf;
1958 buf += sizeof(*hdr);
1959 buf_len -= sizeof(*hdr);
1960
1961 mt7996_memcpy_fromio(dev, buf, mem_region->start,
1962 mem_region->len);
1963
1964 hdr->start = mem_region->start;
1965 hdr->len = mem_region->len;
1966
1967 if (!mem_region->len)
1968 /* note: the header remains, just with zero length */
1969 break;
1970
1971 buf += mem_region->len;
1972 buf_len -= mem_region->len;
1973
1974 mem_region++;
1975 }
1976
1977 mutex_unlock(&dev->dump_mutex);
1978
1979 skip_memdump:
1980 mt7996_coredump_submit(dev);
1981 skip_coredump:
1982 queue_work(dev->mt76.wq, &dev->reset_work);
1983 }
1984
mt7996_reset(struct mt7996_dev * dev)1985 void mt7996_reset(struct mt7996_dev *dev)
1986 {
1987 if (!dev->recovery.hw_init_done)
1988 return;
1989
1990 if (dev->recovery.hw_full_reset)
1991 return;
1992
1993 /* wm/wa exception: do full recovery */
1994 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
1995 dev->recovery.restart = true;
1996 dev_info(dev->mt76.dev,
1997 "%s indicated firmware crash, attempting recovery\n",
1998 wiphy_name(dev->mt76.hw->wiphy));
1999
2000 mt7996_irq_disable(dev, MT_INT_MCU_CMD);
2001 queue_work(dev->mt76.wq, &dev->dump_work);
2002 return;
2003 }
2004
2005 queue_work(dev->mt76.wq, &dev->reset_work);
2006 wake_up(&dev->reset_wait);
2007 }
2008
mt7996_mac_update_stats(struct mt7996_phy * phy)2009 void mt7996_mac_update_stats(struct mt7996_phy *phy)
2010 {
2011 struct mt76_mib_stats *mib = &phy->mib;
2012 struct mt7996_dev *dev = phy->dev;
2013 u8 band_idx = phy->mt76->band_idx;
2014 u32 cnt;
2015 int i;
2016
2017 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
2018 mib->fcs_err_cnt += cnt;
2019
2020 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
2021 mib->rx_fifo_full_cnt += cnt;
2022
2023 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
2024 mib->rx_mpdu_cnt += cnt;
2025
2026 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
2027 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
2028
2029 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
2030 mib->rx_vector_mismatch_cnt += cnt;
2031
2032 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
2033 mib->rx_delimiter_fail_cnt += cnt;
2034
2035 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
2036 mib->rx_len_mismatch_cnt += cnt;
2037
2038 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
2039 mib->tx_ampdu_cnt += cnt;
2040
2041 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
2042 mib->tx_stop_q_empty_cnt += cnt;
2043
2044 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
2045 mib->tx_mpdu_attempts_cnt += cnt;
2046
2047 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2048 mib->tx_mpdu_success_cnt += cnt;
2049
2050 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2051 mib->rx_ampdu_cnt += cnt;
2052
2053 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2054 mib->rx_ampdu_bytes_cnt += cnt;
2055
2056 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2057 mib->rx_ampdu_valid_subframe_cnt += cnt;
2058
2059 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2060 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2061
2062 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2063 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2064
2065 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2066 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2067
2068 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2069 mib->rx_pfdrop_cnt += cnt;
2070
2071 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2072 mib->rx_vec_queue_overflow_drop_cnt += cnt;
2073
2074 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2075 mib->rx_ba_cnt += cnt;
2076
2077 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2078 mib->tx_bf_ebf_ppdu_cnt += cnt;
2079
2080 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2081 mib->tx_bf_ibf_ppdu_cnt += cnt;
2082
2083 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2084 mib->tx_mu_bf_cnt += cnt;
2085
2086 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2087 mib->tx_mu_mpdu_cnt += cnt;
2088
2089 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2090 mib->tx_mu_acked_mpdu_cnt += cnt;
2091
2092 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2093 mib->tx_su_acked_mpdu_cnt += cnt;
2094
2095 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2096 mib->tx_bf_rx_fb_ht_cnt += cnt;
2097 mib->tx_bf_rx_fb_all_cnt += cnt;
2098
2099 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2100 mib->tx_bf_rx_fb_vht_cnt += cnt;
2101 mib->tx_bf_rx_fb_all_cnt += cnt;
2102
2103 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2104 mib->tx_bf_rx_fb_he_cnt += cnt;
2105 mib->tx_bf_rx_fb_all_cnt += cnt;
2106
2107 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2108 mib->tx_bf_rx_fb_eht_cnt += cnt;
2109 mib->tx_bf_rx_fb_all_cnt += cnt;
2110
2111 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2112 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2113 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2114 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2115
2116 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2117 mib->tx_bf_fb_trig_cnt += cnt;
2118
2119 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2120 mib->tx_bf_fb_cpl_cnt += cnt;
2121
2122 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2123 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2124 mib->tx_amsdu[i] += cnt;
2125 mib->tx_amsdu_cnt += cnt;
2126 }
2127
2128 /* rts count */
2129 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2130 mib->rts_cnt += cnt;
2131
2132 /* rts retry count */
2133 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2134 mib->rts_retries_cnt += cnt;
2135
2136 /* ba miss count */
2137 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2138 mib->ba_miss_cnt += cnt;
2139
2140 /* ack fail count */
2141 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2142 mib->ack_fail_cnt += cnt;
2143
2144 for (i = 0; i < 16; i++) {
2145 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2146 phy->mt76->aggr_stats[i] += cnt;
2147 }
2148 }
2149
mt7996_mac_sta_rc_work(struct work_struct * work)2150 void mt7996_mac_sta_rc_work(struct work_struct *work)
2151 {
2152 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2153 struct ieee80211_sta *sta;
2154 struct ieee80211_vif *vif;
2155 struct mt7996_sta *msta;
2156 u32 changed;
2157 LIST_HEAD(list);
2158
2159 spin_lock_bh(&dev->mt76.sta_poll_lock);
2160 list_splice_init(&dev->sta_rc_list, &list);
2161
2162 while (!list_empty(&list)) {
2163 msta = list_first_entry(&list, struct mt7996_sta, rc_list);
2164 list_del_init(&msta->rc_list);
2165 changed = msta->changed;
2166 msta->changed = 0;
2167 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2168
2169 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
2170 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
2171
2172 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2173 IEEE80211_RC_NSS_CHANGED |
2174 IEEE80211_RC_BW_CHANGED))
2175 mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
2176
2177 /* TODO: smps change */
2178
2179 spin_lock_bh(&dev->mt76.sta_poll_lock);
2180 }
2181
2182 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2183 }
2184
mt7996_mac_work(struct work_struct * work)2185 void mt7996_mac_work(struct work_struct *work)
2186 {
2187 struct mt7996_phy *phy;
2188 struct mt76_phy *mphy;
2189
2190 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2191 mac_work.work);
2192 phy = mphy->priv;
2193
2194 mutex_lock(&mphy->dev->mutex);
2195
2196 mt76_update_survey(mphy);
2197 if (++mphy->mac_work_count == 5) {
2198 mphy->mac_work_count = 0;
2199
2200 mt7996_mac_update_stats(phy);
2201 }
2202
2203 mutex_unlock(&mphy->dev->mutex);
2204
2205 mt76_tx_status_check(mphy->dev, false);
2206
2207 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2208 MT7996_WATCHDOG_TIME);
2209 }
2210
mt7996_dfs_stop_radar_detector(struct mt7996_phy * phy)2211 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2212 {
2213 struct mt7996_dev *dev = phy->dev;
2214
2215 if (phy->rdd_state & BIT(0))
2216 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
2217 MT_RX_SEL0, 0);
2218 if (phy->rdd_state & BIT(1))
2219 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
2220 MT_RX_SEL0, 0);
2221 }
2222
mt7996_dfs_start_rdd(struct mt7996_dev * dev,int chain)2223 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
2224 {
2225 int err, region;
2226
2227 switch (dev->mt76.region) {
2228 case NL80211_DFS_ETSI:
2229 region = 0;
2230 break;
2231 case NL80211_DFS_JP:
2232 region = 2;
2233 break;
2234 case NL80211_DFS_FCC:
2235 default:
2236 region = 1;
2237 break;
2238 }
2239
2240 err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
2241 MT_RX_SEL0, region);
2242 if (err < 0)
2243 return err;
2244
2245 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
2246 MT_RX_SEL0, 1);
2247 }
2248
mt7996_dfs_start_radar_detector(struct mt7996_phy * phy)2249 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2250 {
2251 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2252 struct mt7996_dev *dev = phy->dev;
2253 u8 band_idx = phy->mt76->band_idx;
2254 int err;
2255
2256 /* start CAC */
2257 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
2258 MT_RX_SEL0, 0);
2259 if (err < 0)
2260 return err;
2261
2262 err = mt7996_dfs_start_rdd(dev, band_idx);
2263 if (err < 0)
2264 return err;
2265
2266 phy->rdd_state |= BIT(band_idx);
2267
2268 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2269 chandef->width == NL80211_CHAN_WIDTH_80P80) {
2270 err = mt7996_dfs_start_rdd(dev, 1);
2271 if (err < 0)
2272 return err;
2273
2274 phy->rdd_state |= BIT(1);
2275 }
2276
2277 return 0;
2278 }
2279
2280 static int
mt7996_dfs_init_radar_specs(struct mt7996_phy * phy)2281 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
2282 {
2283 const struct mt7996_dfs_radar_spec *radar_specs;
2284 struct mt7996_dev *dev = phy->dev;
2285 int err, i;
2286
2287 switch (dev->mt76.region) {
2288 case NL80211_DFS_FCC:
2289 radar_specs = &fcc_radar_specs;
2290 err = mt7996_mcu_set_fcc5_lpn(dev, 8);
2291 if (err < 0)
2292 return err;
2293 break;
2294 case NL80211_DFS_ETSI:
2295 radar_specs = &etsi_radar_specs;
2296 break;
2297 case NL80211_DFS_JP:
2298 radar_specs = &jp_radar_specs;
2299 break;
2300 default:
2301 return -EINVAL;
2302 }
2303
2304 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2305 err = mt7996_mcu_set_radar_th(dev, i,
2306 &radar_specs->radar_pattern[i]);
2307 if (err < 0)
2308 return err;
2309 }
2310
2311 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2312 }
2313
mt7996_dfs_init_radar_detector(struct mt7996_phy * phy)2314 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2315 {
2316 struct mt7996_dev *dev = phy->dev;
2317 enum mt76_dfs_state dfs_state, prev_state;
2318 int err;
2319
2320 prev_state = phy->mt76->dfs_state;
2321 dfs_state = mt76_phy_dfs_state(phy->mt76);
2322
2323 if (prev_state == dfs_state)
2324 return 0;
2325
2326 if (prev_state == MT_DFS_STATE_UNKNOWN)
2327 mt7996_dfs_stop_radar_detector(phy);
2328
2329 if (dfs_state == MT_DFS_STATE_DISABLED)
2330 goto stop;
2331
2332 if (prev_state <= MT_DFS_STATE_DISABLED) {
2333 err = mt7996_dfs_init_radar_specs(phy);
2334 if (err < 0)
2335 return err;
2336
2337 err = mt7996_dfs_start_radar_detector(phy);
2338 if (err < 0)
2339 return err;
2340
2341 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2342 }
2343
2344 if (dfs_state == MT_DFS_STATE_CAC)
2345 return 0;
2346
2347 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
2348 phy->mt76->band_idx, MT_RX_SEL0, 0);
2349 if (err < 0) {
2350 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2351 return err;
2352 }
2353
2354 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2355 return 0;
2356
2357 stop:
2358 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
2359 phy->mt76->band_idx, MT_RX_SEL0, 0);
2360 if (err < 0)
2361 return err;
2362
2363 mt7996_dfs_stop_radar_detector(phy);
2364 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2365
2366 return 0;
2367 }
2368
2369 static int
mt7996_mac_twt_duration_align(int duration)2370 mt7996_mac_twt_duration_align(int duration)
2371 {
2372 return duration << 8;
2373 }
2374
2375 static u64
mt7996_mac_twt_sched_list_add(struct mt7996_dev * dev,struct mt7996_twt_flow * flow)2376 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
2377 struct mt7996_twt_flow *flow)
2378 {
2379 struct mt7996_twt_flow *iter, *iter_next;
2380 u32 duration = flow->duration << 8;
2381 u64 start_tsf;
2382
2383 iter = list_first_entry_or_null(&dev->twt_list,
2384 struct mt7996_twt_flow, list);
2385 if (!iter || !iter->sched || iter->start_tsf > duration) {
2386 /* add flow as first entry in the list */
2387 list_add(&flow->list, &dev->twt_list);
2388 return 0;
2389 }
2390
2391 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2392 start_tsf = iter->start_tsf +
2393 mt7996_mac_twt_duration_align(iter->duration);
2394 if (list_is_last(&iter->list, &dev->twt_list))
2395 break;
2396
2397 if (!iter_next->sched ||
2398 iter_next->start_tsf > start_tsf + duration) {
2399 list_add(&flow->list, &iter->list);
2400 goto out;
2401 }
2402 }
2403
2404 /* add flow as last entry in the list */
2405 list_add_tail(&flow->list, &dev->twt_list);
2406 out:
2407 return start_tsf;
2408 }
2409
mt7996_mac_check_twt_req(struct ieee80211_twt_setup * twt)2410 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2411 {
2412 struct ieee80211_twt_params *twt_agrt;
2413 u64 interval, duration;
2414 u16 mantissa;
2415 u8 exp;
2416
2417 /* only individual agreement supported */
2418 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2419 return -EOPNOTSUPP;
2420
2421 /* only 256us unit supported */
2422 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2423 return -EOPNOTSUPP;
2424
2425 twt_agrt = (struct ieee80211_twt_params *)twt->params;
2426
2427 /* explicit agreement not supported */
2428 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2429 return -EOPNOTSUPP;
2430
2431 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2432 le16_to_cpu(twt_agrt->req_type));
2433 mantissa = le16_to_cpu(twt_agrt->mantissa);
2434 duration = twt_agrt->min_twt_dur << 8;
2435
2436 interval = (u64)mantissa << exp;
2437 if (interval < duration)
2438 return -EOPNOTSUPP;
2439
2440 return 0;
2441 }
2442
mt7996_mac_add_twt_setup(struct ieee80211_hw * hw,struct ieee80211_sta * sta,struct ieee80211_twt_setup * twt)2443 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
2444 struct ieee80211_sta *sta,
2445 struct ieee80211_twt_setup *twt)
2446 {
2447 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2448 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2449 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2450 u16 req_type = le16_to_cpu(twt_agrt->req_type);
2451 enum ieee80211_twt_setup_cmd sta_setup_cmd;
2452 struct mt7996_dev *dev = mt7996_hw_dev(hw);
2453 struct mt7996_twt_flow *flow;
2454 int flowid, table_id;
2455 u8 exp;
2456
2457 if (mt7996_mac_check_twt_req(twt))
2458 goto out;
2459
2460 mutex_lock(&dev->mt76.mutex);
2461
2462 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
2463 goto unlock;
2464
2465 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2466 goto unlock;
2467
2468 flowid = ffs(~msta->twt.flowid_mask) - 1;
2469 le16p_replace_bits(&twt_agrt->req_type, flowid,
2470 IEEE80211_TWT_REQTYPE_FLOWID);
2471
2472 table_id = ffs(~dev->twt.table_mask) - 1;
2473 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2474 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2475
2476 flow = &msta->twt.flow[flowid];
2477 memset(flow, 0, sizeof(*flow));
2478 INIT_LIST_HEAD(&flow->list);
2479 flow->wcid = msta->wcid.idx;
2480 flow->table_id = table_id;
2481 flow->id = flowid;
2482 flow->duration = twt_agrt->min_twt_dur;
2483 flow->mantissa = twt_agrt->mantissa;
2484 flow->exp = exp;
2485 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2486 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2487 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2488
2489 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2490 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2491 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2492 u64 flow_tsf, curr_tsf;
2493 u32 rem;
2494
2495 flow->sched = true;
2496 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
2497 curr_tsf = __mt7996_get_tsf(hw, msta->vif);
2498 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2499 flow_tsf = curr_tsf + interval - rem;
2500 twt_agrt->twt = cpu_to_le64(flow_tsf);
2501 } else {
2502 list_add_tail(&flow->list, &dev->twt_list);
2503 }
2504 flow->tsf = le64_to_cpu(twt_agrt->twt);
2505
2506 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2507 goto unlock;
2508
2509 setup_cmd = TWT_SETUP_CMD_ACCEPT;
2510 dev->twt.table_mask |= BIT(table_id);
2511 msta->twt.flowid_mask |= BIT(flowid);
2512 dev->twt.n_agrt++;
2513
2514 unlock:
2515 mutex_unlock(&dev->mt76.mutex);
2516 out:
2517 le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
2518 IEEE80211_TWT_REQTYPE_SETUP_CMD);
2519 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
2520 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
2521 }
2522
mt7996_mac_twt_teardown_flow(struct mt7996_dev * dev,struct mt7996_sta * msta,u8 flowid)2523 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
2524 struct mt7996_sta *msta,
2525 u8 flowid)
2526 {
2527 struct mt7996_twt_flow *flow;
2528
2529 lockdep_assert_held(&dev->mt76.mutex);
2530
2531 if (flowid >= ARRAY_SIZE(msta->twt.flow))
2532 return;
2533
2534 if (!(msta->twt.flowid_mask & BIT(flowid)))
2535 return;
2536
2537 flow = &msta->twt.flow[flowid];
2538 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
2539 MCU_TWT_AGRT_DELETE))
2540 return;
2541
2542 list_del_init(&flow->list);
2543 msta->twt.flowid_mask &= ~BIT(flowid);
2544 dev->twt.table_mask &= ~BIT(flow->table_id);
2545 dev->twt.n_agrt--;
2546 }
2547