xref: /linux/drivers/net/wireless/mediatek/mt76/mt7996/mac.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2022 MediaTek Inc.
4  */
5 
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
8 #include "mt7996.h"
9 #include "../dma.h"
10 #include "mac.h"
11 #include "mcu.h"
12 
13 #define to_rssi(field, rcpi)	((FIELD_GET(field, rcpi) - 220) / 2)
14 
15 #define HE_BITS(f)		cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
16 #define HE_PREP(f, m, v)	le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
17 						 IEEE80211_RADIOTAP_HE_##f)
18 
19 static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
20 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
21 	.radar_pattern = {
22 		[5] =  { 1, 0,  6, 32, 28, 0,  990, 5010, 17, 1, 1 },
23 		[6] =  { 1, 0,  9, 32, 28, 0,  615, 5010, 27, 1, 1 },
24 		[7] =  { 1, 0, 15, 32, 28, 0,  240,  445, 27, 1, 1 },
25 		[8] =  { 1, 0, 12, 32, 28, 0,  240,  510, 42, 1, 1 },
26 		[9] =  { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
27 		[10] = { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
28 		[11] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 18, 32, 28, { },  54 },
29 		[12] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 27, 32, 24, { },  54 },
30 	},
31 };
32 
33 static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
34 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
35 	.radar_pattern = {
36 		[0] = { 1, 0,  8,  32, 28, 0, 508, 3076, 13, 1,  1 },
37 		[1] = { 1, 0, 12,  32, 28, 0, 140,  240, 17, 1,  1 },
38 		[2] = { 1, 0,  8,  32, 28, 0, 190,  510, 22, 1,  1 },
39 		[3] = { 1, 0,  6,  32, 28, 0, 190,  510, 32, 1,  1 },
40 		[4] = { 1, 0,  9, 255, 28, 0, 323,  343, 13, 1, 32 },
41 	},
42 };
43 
44 static const struct mt7996_dfs_radar_spec jp_radar_specs = {
45 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
46 	.radar_pattern = {
47 		[0] =  { 1, 0,  8,  32, 28, 0,  508, 3076,  13, 1,  1 },
48 		[1] =  { 1, 0, 12,  32, 28, 0,  140,  240,  17, 1,  1 },
49 		[2] =  { 1, 0,  8,  32, 28, 0,  190,  510,  22, 1,  1 },
50 		[3] =  { 1, 0,  6,  32, 28, 0,  190,  510,  32, 1,  1 },
51 		[4] =  { 1, 0,  9, 255, 28, 0,  323,  343,  13, 1, 32 },
52 		[13] = { 1, 0,  7,  32, 28, 0, 3836, 3856,  14, 1,  1 },
53 		[14] = { 1, 0,  6,  32, 28, 0,  615, 5010, 110, 1,  1 },
54 		[15] = { 1, 1,  0,   0,  0, 0,   15, 5010, 110, 0,  0, 12, 32, 28 },
55 	},
56 };
57 
58 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
59 					    u16 idx, bool unicast)
60 {
61 	struct mt7996_sta *sta;
62 	struct mt76_wcid *wcid;
63 
64 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
65 		return NULL;
66 
67 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
68 	if (unicast || !wcid)
69 		return wcid;
70 
71 	if (!wcid->sta)
72 		return NULL;
73 
74 	sta = container_of(wcid, struct mt7996_sta, wcid);
75 	if (!sta->vif)
76 		return NULL;
77 
78 	return &sta->vif->sta.wcid;
79 }
80 
81 void mt7996_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
82 {
83 }
84 
85 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
86 {
87 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
88 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
89 
90 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
91 			 0, 5000);
92 }
93 
94 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
95 {
96 	mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
97 		FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
98 
99 	return MT_WTBL_LMAC_OFFS(wcid, dw);
100 }
101 
102 static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
103 {
104 	static const u8 ac_to_tid[] = {
105 		[IEEE80211_AC_BE] = 0,
106 		[IEEE80211_AC_BK] = 1,
107 		[IEEE80211_AC_VI] = 4,
108 		[IEEE80211_AC_VO] = 6
109 	};
110 	struct ieee80211_sta *sta;
111 	struct mt7996_sta *msta;
112 	struct rate_info *rate;
113 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
114 	LIST_HEAD(sta_poll_list);
115 	int i;
116 
117 	spin_lock_bh(&dev->sta_poll_lock);
118 	list_splice_init(&dev->sta_poll_list, &sta_poll_list);
119 	spin_unlock_bh(&dev->sta_poll_lock);
120 
121 	rcu_read_lock();
122 
123 	while (true) {
124 		bool clear = false;
125 		u32 addr, val;
126 		u16 idx;
127 		s8 rssi[4];
128 		u8 bw;
129 
130 		spin_lock_bh(&dev->sta_poll_lock);
131 		if (list_empty(&sta_poll_list)) {
132 			spin_unlock_bh(&dev->sta_poll_lock);
133 			break;
134 		}
135 		msta = list_first_entry(&sta_poll_list,
136 					struct mt7996_sta, poll_list);
137 		list_del_init(&msta->poll_list);
138 		spin_unlock_bh(&dev->sta_poll_lock);
139 
140 		idx = msta->wcid.idx;
141 
142 		/* refresh peer's airtime reporting */
143 		addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
144 
145 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
146 			u32 tx_last = msta->airtime_ac[i];
147 			u32 rx_last = msta->airtime_ac[i + 4];
148 
149 			msta->airtime_ac[i] = mt76_rr(dev, addr);
150 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
151 
152 			tx_time[i] = msta->airtime_ac[i] - tx_last;
153 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
154 
155 			if ((tx_last | rx_last) & BIT(30))
156 				clear = true;
157 
158 			addr += 8;
159 		}
160 
161 		if (clear) {
162 			mt7996_mac_wtbl_update(dev, idx,
163 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
164 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
165 		}
166 
167 		if (!msta->wcid.sta)
168 			continue;
169 
170 		sta = container_of((void *)msta, struct ieee80211_sta,
171 				   drv_priv);
172 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
173 			u8 q = mt76_connac_lmac_mapping(i);
174 			u32 tx_cur = tx_time[q];
175 			u32 rx_cur = rx_time[q];
176 			u8 tid = ac_to_tid[i];
177 
178 			if (!tx_cur && !rx_cur)
179 				continue;
180 
181 			ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
182 		}
183 
184 		/* We don't support reading GI info from txs packets.
185 		 * For accurate tx status reporting and AQL improvement,
186 		 * we need to make sure that flags match so polling GI
187 		 * from per-sta counters directly.
188 		 */
189 		rate = &msta->wcid.rate;
190 
191 		switch (rate->bw) {
192 		case RATE_INFO_BW_160:
193 			bw = IEEE80211_STA_RX_BW_160;
194 			break;
195 		case RATE_INFO_BW_80:
196 			bw = IEEE80211_STA_RX_BW_80;
197 			break;
198 		case RATE_INFO_BW_40:
199 			bw = IEEE80211_STA_RX_BW_40;
200 			break;
201 		default:
202 			bw = IEEE80211_STA_RX_BW_20;
203 			break;
204 		}
205 
206 		addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6);
207 		val = mt76_rr(dev, addr);
208 		if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
209 			u8 offs = 24 + 2 * bw;
210 
211 			rate->he_gi = (val & (0x3 << offs)) >> offs;
212 		} else if (rate->flags &
213 			   (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
214 			if (val & BIT(12 + bw))
215 				rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
216 			else
217 				rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
218 		}
219 
220 		/* get signal strength of resp frames (CTS/BA/ACK) */
221 		addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
222 		val = mt76_rr(dev, addr);
223 
224 		rssi[0] = to_rssi(GENMASK(7, 0), val);
225 		rssi[1] = to_rssi(GENMASK(15, 8), val);
226 		rssi[2] = to_rssi(GENMASK(23, 16), val);
227 		rssi[3] = to_rssi(GENMASK(31, 14), val);
228 
229 		msta->ack_signal =
230 			mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
231 
232 		ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
233 	}
234 
235 	rcu_read_unlock();
236 }
237 
238 void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
239 			      struct ieee80211_vif *vif, bool enable)
240 {
241 	struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
242 	u32 addr;
243 
244 	addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
245 	if (enable)
246 		mt76_set(dev, addr, BIT(5));
247 	else
248 		mt76_clear(dev, addr, BIT(5));
249 }
250 
251 static void
252 mt7996_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
253 				 struct ieee80211_radiotap_he *he,
254 				 __le32 *rxv)
255 {
256 	u32 ru_h, ru_l;
257 	u8 ru, offs = 0;
258 
259 	ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
260 	ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
261 	ru = (u8)(ru_l | ru_h << 4);
262 
263 	status->bw = RATE_INFO_BW_HE_RU;
264 
265 	switch (ru) {
266 	case 0 ... 36:
267 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
268 		offs = ru;
269 		break;
270 	case 37 ... 52:
271 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
272 		offs = ru - 37;
273 		break;
274 	case 53 ... 60:
275 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
276 		offs = ru - 53;
277 		break;
278 	case 61 ... 64:
279 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
280 		offs = ru - 61;
281 		break;
282 	case 65 ... 66:
283 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
284 		offs = ru - 65;
285 		break;
286 	case 67:
287 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
288 		break;
289 	case 68:
290 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
291 		break;
292 	}
293 
294 	he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
295 	he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
296 		     le16_encode_bits(offs,
297 				      IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
298 }
299 
300 static void
301 mt7996_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
302 {
303 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
304 	static const struct ieee80211_radiotap_he_mu mu_known = {
305 		.flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
306 			  HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
307 			  HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
308 			  HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN),
309 		.flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
310 	};
311 	struct ieee80211_radiotap_he_mu *he_mu = NULL;
312 
313 	status->flag |= RX_FLAG_RADIOTAP_HE_MU;
314 
315 	he_mu = skb_push(skb, sizeof(mu_known));
316 	memcpy(he_mu, &mu_known, sizeof(mu_known));
317 
318 #define MU_PREP(f, v)	le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
319 
320 	he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
321 	if (status->he_dcm)
322 		he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
323 
324 	he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
325 			 MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
326 				 le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
327 
328 	he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
329 
330 	if (status->bw >= RATE_INFO_BW_40) {
331 		he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
332 		he_mu->ru_ch2[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
333 	}
334 
335 	if (status->bw >= RATE_INFO_BW_80) {
336 		he_mu->ru_ch1[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
337 		he_mu->ru_ch2[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
338 	}
339 }
340 
341 static void
342 mt7996_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
343 {
344 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
345 	static const struct ieee80211_radiotap_he known = {
346 		.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
347 			 HE_BITS(DATA1_DATA_DCM_KNOWN) |
348 			 HE_BITS(DATA1_STBC_KNOWN) |
349 			 HE_BITS(DATA1_CODING_KNOWN) |
350 			 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
351 			 HE_BITS(DATA1_DOPPLER_KNOWN) |
352 			 HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
353 			 HE_BITS(DATA1_BSS_COLOR_KNOWN),
354 		.data2 = HE_BITS(DATA2_GI_KNOWN) |
355 			 HE_BITS(DATA2_TXBF_KNOWN) |
356 			 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
357 			 HE_BITS(DATA2_TXOP_KNOWN),
358 	};
359 	struct ieee80211_radiotap_he *he = NULL;
360 	u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
361 
362 	status->flag |= RX_FLAG_RADIOTAP_HE;
363 
364 	he = skb_push(skb, sizeof(known));
365 	memcpy(he, &known, sizeof(known));
366 
367 	he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
368 		    HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
369 	he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
370 	he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
371 		    le16_encode_bits(ltf_size,
372 				     IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
373 	if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
374 		he->data5 |= HE_BITS(DATA5_TXBF);
375 	he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
376 		    HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
377 
378 	switch (mode) {
379 	case MT_PHY_TYPE_HE_SU:
380 		he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
381 			     HE_BITS(DATA1_UL_DL_KNOWN) |
382 			     HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
383 			     HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
384 
385 		he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
386 			     HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
387 		break;
388 	case MT_PHY_TYPE_HE_EXT_SU:
389 		he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
390 			     HE_BITS(DATA1_UL_DL_KNOWN) |
391 			     HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
392 
393 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
394 		break;
395 	case MT_PHY_TYPE_HE_MU:
396 		he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
397 			     HE_BITS(DATA1_UL_DL_KNOWN);
398 
399 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
400 		he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
401 
402 		mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
403 		mt7996_mac_decode_he_mu_radiotap(skb, rxv);
404 		break;
405 	case MT_PHY_TYPE_HE_TB:
406 		he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
407 			     HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
408 			     HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
409 			     HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
410 
411 		he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
412 			     HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
413 			     HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
414 			     HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
415 
416 		mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
417 		break;
418 	default:
419 		break;
420 	}
421 }
422 
423 /* The HW does not translate the mac header to 802.3 for mesh point */
424 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
425 {
426 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
427 	struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
428 	struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
429 	__le32 *rxd = (__le32 *)skb->data;
430 	struct ieee80211_sta *sta;
431 	struct ieee80211_vif *vif;
432 	struct ieee80211_hdr hdr;
433 	u16 frame_control;
434 
435 	if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
436 	    MT_RXD3_NORMAL_U2M)
437 		return -EINVAL;
438 
439 	if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
440 		return -EINVAL;
441 
442 	if (!msta || !msta->vif)
443 		return -EINVAL;
444 
445 	sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
446 	vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
447 
448 	/* store the info from RXD and ethhdr to avoid being overridden */
449 	frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
450 	hdr.frame_control = cpu_to_le16(frame_control);
451 	hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
452 	hdr.duration_id = 0;
453 
454 	ether_addr_copy(hdr.addr1, vif->addr);
455 	ether_addr_copy(hdr.addr2, sta->addr);
456 	switch (frame_control & (IEEE80211_FCTL_TODS |
457 				 IEEE80211_FCTL_FROMDS)) {
458 	case 0:
459 		ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
460 		break;
461 	case IEEE80211_FCTL_FROMDS:
462 		ether_addr_copy(hdr.addr3, eth_hdr->h_source);
463 		break;
464 	case IEEE80211_FCTL_TODS:
465 		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
466 		break;
467 	case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
468 		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
469 		ether_addr_copy(hdr.addr4, eth_hdr->h_source);
470 		break;
471 	default:
472 		break;
473 	}
474 
475 	skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
476 	if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
477 	    eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
478 		ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
479 	else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
480 		ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
481 	else
482 		skb_pull(skb, 2);
483 
484 	if (ieee80211_has_order(hdr.frame_control))
485 		memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
486 		       IEEE80211_HT_CTL_LEN);
487 	if (ieee80211_is_data_qos(hdr.frame_control)) {
488 		__le16 qos_ctrl;
489 
490 		qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
491 		memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
492 		       IEEE80211_QOS_CTL_LEN);
493 	}
494 
495 	if (ieee80211_has_a4(hdr.frame_control))
496 		memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
497 	else
498 		memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
499 
500 	return 0;
501 }
502 
503 static int
504 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
505 			struct mt76_rx_status *status,
506 			struct ieee80211_supported_band *sband,
507 			__le32 *rxv, u8 *mode)
508 {
509 	u32 v0, v2;
510 	u8 stbc, gi, bw, dcm, nss;
511 	int i, idx;
512 	bool cck = false;
513 
514 	v0 = le32_to_cpu(rxv[0]);
515 	v2 = le32_to_cpu(rxv[2]);
516 
517 	idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
518 	i = idx;
519 	nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
520 
521 	stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
522 	gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
523 	*mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
524 	dcm = FIELD_GET(MT_PRXV_DCM, v2);
525 	bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
526 
527 	switch (*mode) {
528 	case MT_PHY_TYPE_CCK:
529 		cck = true;
530 		fallthrough;
531 	case MT_PHY_TYPE_OFDM:
532 		i = mt76_get_rate(&dev->mt76, sband, i, cck);
533 		break;
534 	case MT_PHY_TYPE_HT_GF:
535 	case MT_PHY_TYPE_HT:
536 		status->encoding = RX_ENC_HT;
537 		if (gi)
538 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
539 		if (i > 31)
540 			return -EINVAL;
541 		break;
542 	case MT_PHY_TYPE_VHT:
543 		status->nss = nss;
544 		status->encoding = RX_ENC_VHT;
545 		if (gi)
546 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
547 		if (i > 11)
548 			return -EINVAL;
549 		break;
550 	case MT_PHY_TYPE_HE_MU:
551 	case MT_PHY_TYPE_HE_SU:
552 	case MT_PHY_TYPE_HE_EXT_SU:
553 	case MT_PHY_TYPE_HE_TB:
554 		status->nss = nss;
555 		status->encoding = RX_ENC_HE;
556 		i &= GENMASK(3, 0);
557 
558 		if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
559 			status->he_gi = gi;
560 
561 		status->he_dcm = dcm;
562 		break;
563 	default:
564 		return -EINVAL;
565 	}
566 	status->rate_idx = i;
567 
568 	switch (bw) {
569 	case IEEE80211_STA_RX_BW_20:
570 		break;
571 	case IEEE80211_STA_RX_BW_40:
572 		if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
573 		    (idx & MT_PRXV_TX_ER_SU_106T)) {
574 			status->bw = RATE_INFO_BW_HE_RU;
575 			status->he_ru =
576 				NL80211_RATE_INFO_HE_RU_ALLOC_106;
577 		} else {
578 			status->bw = RATE_INFO_BW_40;
579 		}
580 		break;
581 	case IEEE80211_STA_RX_BW_80:
582 		status->bw = RATE_INFO_BW_80;
583 		break;
584 	case IEEE80211_STA_RX_BW_160:
585 		status->bw = RATE_INFO_BW_160;
586 		break;
587 	default:
588 		return -EINVAL;
589 	}
590 
591 	status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
592 	if (*mode < MT_PHY_TYPE_HE_SU && gi)
593 		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
594 
595 	return 0;
596 }
597 
598 static int
599 mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
600 {
601 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
602 	struct mt76_phy *mphy = &dev->mt76.phy;
603 	struct mt7996_phy *phy = &dev->phy;
604 	struct ieee80211_supported_band *sband;
605 	__le32 *rxd = (__le32 *)skb->data;
606 	__le32 *rxv = NULL;
607 	u32 rxd0 = le32_to_cpu(rxd[0]);
608 	u32 rxd1 = le32_to_cpu(rxd[1]);
609 	u32 rxd2 = le32_to_cpu(rxd[2]);
610 	u32 rxd3 = le32_to_cpu(rxd[3]);
611 	u32 rxd4 = le32_to_cpu(rxd[4]);
612 	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
613 	u32 csum_status = *(u32 *)skb->cb;
614 	bool unicast, insert_ccmp_hdr = false;
615 	u8 remove_pad, amsdu_info, band_idx;
616 	u8 mode = 0, qos_ctl = 0;
617 	bool hdr_trans;
618 	u16 hdr_gap;
619 	u16 seq_ctrl = 0;
620 	__le16 fc = 0;
621 	int idx;
622 
623 	memset(status, 0, sizeof(*status));
624 
625 	band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
626 	mphy = dev->mt76.phys[band_idx];
627 	phy = mphy->priv;
628 	status->phy_idx = mphy->band_idx;
629 
630 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
631 		return -EINVAL;
632 
633 	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
634 		return -EINVAL;
635 
636 	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
637 	if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
638 		return -EINVAL;
639 
640 	/* ICV error or CCMP/BIP/WPI MIC error */
641 	if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
642 		status->flag |= RX_FLAG_ONLY_MONITOR;
643 
644 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
645 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
646 	status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
647 
648 	if (status->wcid) {
649 		struct mt7996_sta *msta;
650 
651 		msta = container_of(status->wcid, struct mt7996_sta, wcid);
652 		spin_lock_bh(&dev->sta_poll_lock);
653 		if (list_empty(&msta->poll_list))
654 			list_add_tail(&msta->poll_list, &dev->sta_poll_list);
655 		spin_unlock_bh(&dev->sta_poll_lock);
656 	}
657 
658 	status->freq = mphy->chandef.chan->center_freq;
659 	status->band = mphy->chandef.chan->band;
660 	if (status->band == NL80211_BAND_5GHZ)
661 		sband = &mphy->sband_5g.sband;
662 	else if (status->band == NL80211_BAND_6GHZ)
663 		sband = &mphy->sband_6g.sband;
664 	else
665 		sband = &mphy->sband_2g.sband;
666 
667 	if (!sband->channels)
668 		return -EINVAL;
669 
670 	if ((rxd0 & csum_mask) == csum_mask &&
671 	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
672 		skb->ip_summed = CHECKSUM_UNNECESSARY;
673 
674 	if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
675 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
676 
677 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
678 		status->flag |= RX_FLAG_MMIC_ERROR;
679 
680 	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
681 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
682 		status->flag |= RX_FLAG_DECRYPTED;
683 		status->flag |= RX_FLAG_IV_STRIPPED;
684 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
685 	}
686 
687 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
688 
689 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
690 		return -EINVAL;
691 
692 	rxd += 8;
693 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
694 		u32 v0 = le32_to_cpu(rxd[0]);
695 		u32 v2 = le32_to_cpu(rxd[2]);
696 
697 		fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
698 		qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
699 		seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
700 
701 		rxd += 4;
702 		if ((u8 *)rxd - skb->data >= skb->len)
703 			return -EINVAL;
704 	}
705 
706 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
707 		u8 *data = (u8 *)rxd;
708 
709 		if (status->flag & RX_FLAG_DECRYPTED) {
710 			switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
711 			case MT_CIPHER_AES_CCMP:
712 			case MT_CIPHER_CCMP_CCX:
713 			case MT_CIPHER_CCMP_256:
714 				insert_ccmp_hdr =
715 					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
716 				fallthrough;
717 			case MT_CIPHER_TKIP:
718 			case MT_CIPHER_TKIP_NO_MIC:
719 			case MT_CIPHER_GCMP:
720 			case MT_CIPHER_GCMP_256:
721 				status->iv[0] = data[5];
722 				status->iv[1] = data[4];
723 				status->iv[2] = data[3];
724 				status->iv[3] = data[2];
725 				status->iv[4] = data[1];
726 				status->iv[5] = data[0];
727 				break;
728 			default:
729 				break;
730 			}
731 		}
732 		rxd += 4;
733 		if ((u8 *)rxd - skb->data >= skb->len)
734 			return -EINVAL;
735 	}
736 
737 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
738 		status->timestamp = le32_to_cpu(rxd[0]);
739 		status->flag |= RX_FLAG_MACTIME_START;
740 
741 		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
742 			status->flag |= RX_FLAG_AMPDU_DETAILS;
743 
744 			/* all subframes of an A-MPDU have the same timestamp */
745 			if (phy->rx_ampdu_ts != status->timestamp) {
746 				if (!++phy->ampdu_ref)
747 					phy->ampdu_ref++;
748 			}
749 			phy->rx_ampdu_ts = status->timestamp;
750 
751 			status->ampdu_ref = phy->ampdu_ref;
752 		}
753 
754 		rxd += 4;
755 		if ((u8 *)rxd - skb->data >= skb->len)
756 			return -EINVAL;
757 	}
758 
759 	/* RXD Group 3 - P-RXV */
760 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
761 		u32 v3;
762 		int ret;
763 
764 		rxv = rxd;
765 		rxd += 4;
766 		if ((u8 *)rxd - skb->data >= skb->len)
767 			return -EINVAL;
768 
769 		v3 = le32_to_cpu(rxv[3]);
770 
771 		status->chains = mphy->antenna_mask;
772 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
773 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
774 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
775 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
776 
777 		/* RXD Group 5 - C-RXV */
778 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
779 			rxd += 24;
780 			if ((u8 *)rxd - skb->data >= skb->len)
781 				return -EINVAL;
782 		}
783 
784 		ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
785 		if (ret < 0)
786 			return ret;
787 	}
788 
789 	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
790 	status->amsdu = !!amsdu_info;
791 	if (status->amsdu) {
792 		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
793 		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
794 	}
795 
796 	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
797 	if (hdr_trans && ieee80211_has_morefrags(fc)) {
798 		if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
799 			return -EINVAL;
800 		hdr_trans = false;
801 	} else {
802 		int pad_start = 0;
803 
804 		skb_pull(skb, hdr_gap);
805 		if (!hdr_trans && status->amsdu) {
806 			pad_start = ieee80211_get_hdrlen_from_skb(skb);
807 		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
808 			/* When header translation failure is indicated,
809 			 * the hardware will insert an extra 2-byte field
810 			 * containing the data length after the protocol
811 			 * type field.
812 			 */
813 			pad_start = 12;
814 			if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
815 				pad_start += 4;
816 			else
817 				pad_start = 0;
818 		}
819 
820 		if (pad_start) {
821 			memmove(skb->data + 2, skb->data, pad_start);
822 			skb_pull(skb, 2);
823 		}
824 	}
825 
826 	if (!hdr_trans) {
827 		struct ieee80211_hdr *hdr;
828 
829 		if (insert_ccmp_hdr) {
830 			u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
831 
832 			mt76_insert_ccmp_hdr(skb, key_id);
833 		}
834 
835 		hdr = mt76_skb_get_hdr(skb);
836 		fc = hdr->frame_control;
837 		if (ieee80211_is_data_qos(fc)) {
838 			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
839 			qos_ctl = *ieee80211_get_qos_ctl(hdr);
840 		}
841 	} else {
842 		status->flag |= RX_FLAG_8023;
843 	}
844 
845 	if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
846 		mt7996_mac_decode_he_radiotap(skb, rxv, mode);
847 
848 	if (!status->wcid || !ieee80211_is_data_qos(fc))
849 		return 0;
850 
851 	status->aggr = unicast &&
852 		       !ieee80211_is_qos_nullfunc(fc);
853 	status->qos_ctl = qos_ctl;
854 	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
855 
856 	return 0;
857 }
858 
859 static void
860 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
861 			   struct sk_buff *skb, struct mt76_wcid *wcid)
862 {
863 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
864 	u8 fc_type, fc_stype;
865 	u16 ethertype;
866 	bool wmm = false;
867 	u32 val;
868 
869 	if (wcid->sta) {
870 		struct ieee80211_sta *sta;
871 
872 		sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
873 		wmm = sta->wme;
874 	}
875 
876 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
877 	      FIELD_PREP(MT_TXD1_TID, tid);
878 
879 	ethertype = get_unaligned_be16(&skb->data[12]);
880 	if (ethertype >= ETH_P_802_3_MIN)
881 		val |= MT_TXD1_ETH_802_3;
882 
883 	txwi[1] |= cpu_to_le32(val);
884 
885 	fc_type = IEEE80211_FTYPE_DATA >> 2;
886 	fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
887 
888 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
889 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
890 
891 	txwi[2] |= cpu_to_le32(val);
892 }
893 
894 static void
895 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
896 			    struct sk_buff *skb, struct ieee80211_key_conf *key)
897 {
898 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
899 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
900 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
901 	bool multicast = is_multicast_ether_addr(hdr->addr1);
902 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
903 	__le16 fc = hdr->frame_control;
904 	u8 fc_type, fc_stype;
905 	u32 val;
906 
907 	if (ieee80211_is_action(fc) &&
908 	    mgmt->u.action.category == WLAN_CATEGORY_BACK &&
909 	    mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
910 		tid = MT_TX_ADDBA;
911 	else if (ieee80211_is_mgmt(hdr->frame_control))
912 		tid = MT_TX_NORMAL;
913 
914 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
915 	      FIELD_PREP(MT_TXD1_HDR_INFO,
916 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
917 	      FIELD_PREP(MT_TXD1_TID, tid);
918 
919 	if (!ieee80211_is_data(fc) || multicast ||
920 	    info->flags & IEEE80211_TX_CTL_USE_MINRATE)
921 		val |= MT_TXD1_FIXED_RATE;
922 
923 	if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
924 	    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
925 		val |= MT_TXD1_BIP;
926 		txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
927 	}
928 
929 	txwi[1] |= cpu_to_le32(val);
930 
931 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
932 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
933 
934 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
935 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
936 
937 	txwi[2] |= cpu_to_le32(val);
938 
939 	txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
940 	if (ieee80211_is_beacon(fc)) {
941 		txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
942 		txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
943 	}
944 
945 	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
946 		u16 seqno = le16_to_cpu(hdr->seq_ctrl);
947 
948 		if (ieee80211_is_back_req(hdr->frame_control)) {
949 			struct ieee80211_bar *bar;
950 
951 			bar = (struct ieee80211_bar *)skb->data;
952 			seqno = le16_to_cpu(bar->start_seq_num);
953 		}
954 
955 		val = MT_TXD3_SN_VALID |
956 		      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
957 		txwi[3] |= cpu_to_le32(val);
958 		txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
959 	}
960 }
961 
962 static u16
963 mt7996_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
964 		       bool beacon, bool mcast)
965 {
966 	u8 mode = 0, band = mphy->chandef.chan->band;
967 	int rateidx = 0, mcast_rate;
968 
969 	if (beacon) {
970 		struct cfg80211_bitrate_mask *mask;
971 
972 		mask = &vif->bss_conf.beacon_tx_rate;
973 		if (hweight16(mask->control[band].he_mcs[0]) == 1) {
974 			rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
975 			mode = MT_PHY_TYPE_HE_SU;
976 			goto out;
977 		} else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
978 			rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
979 			mode = MT_PHY_TYPE_VHT;
980 			goto out;
981 		} else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
982 			rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
983 			mode = MT_PHY_TYPE_HT;
984 			goto out;
985 		} else if (hweight32(mask->control[band].legacy) == 1) {
986 			rateidx = ffs(mask->control[band].legacy) - 1;
987 			goto legacy;
988 		}
989 	}
990 
991 	mcast_rate = vif->bss_conf.mcast_rate[band];
992 	if (mcast && mcast_rate > 0)
993 		rateidx = mcast_rate - 1;
994 	else
995 		rateidx = ffs(vif->bss_conf.basic_rates) - 1;
996 
997 legacy:
998 	rateidx = mt76_calculate_default_rate(mphy, rateidx);
999 	mode = rateidx >> 8;
1000 	rateidx &= GENMASK(7, 0);
1001 
1002 out:
1003 	return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
1004 	       FIELD_PREP(MT_TX_RATE_MODE, mode);
1005 }
1006 
1007 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
1008 			   struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
1009 			   struct ieee80211_key_conf *key, u32 changed)
1010 {
1011 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1012 	struct ieee80211_vif *vif = info->control.vif;
1013 	struct mt76_phy *mphy = &dev->mphy;
1014 	u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
1015 	u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
1016 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1017 	u16 tx_count = 15;
1018 	u32 val;
1019 	bool beacon = !!(changed & (BSS_CHANGED_BEACON |
1020 				    BSS_CHANGED_BEACON_ENABLED));
1021 	bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
1022 					 BSS_CHANGED_FILS_DISCOVERY));
1023 
1024 	if (vif) {
1025 		struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
1026 
1027 		omac_idx = mvif->mt76.omac_idx;
1028 		wmm_idx = mvif->mt76.wmm_idx;
1029 		band_idx = mvif->mt76.band_idx;
1030 	}
1031 
1032 	mphy = mt76_dev_phy(&dev->mt76, band_idx);
1033 
1034 	if (inband_disc) {
1035 		p_fmt = MT_TX_TYPE_FW;
1036 		q_idx = MT_LMAC_ALTX0;
1037 	} else if (beacon) {
1038 		p_fmt = MT_TX_TYPE_FW;
1039 		q_idx = MT_LMAC_BCN0;
1040 	} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
1041 		p_fmt = MT_TX_TYPE_CT;
1042 		q_idx = MT_LMAC_ALTX0;
1043 	} else {
1044 		p_fmt = MT_TX_TYPE_CT;
1045 		q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
1046 			mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
1047 	}
1048 
1049 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
1050 	      FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
1051 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
1052 	txwi[0] = cpu_to_le32(val);
1053 
1054 	val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
1055 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
1056 
1057 	if (band_idx)
1058 		val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
1059 
1060 	txwi[1] = cpu_to_le32(val);
1061 	txwi[2] = 0;
1062 
1063 	val = MT_TXD3_SW_POWER_MGMT |
1064 	      FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
1065 	if (key)
1066 		val |= MT_TXD3_PROTECT_FRAME;
1067 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1068 		val |= MT_TXD3_NO_ACK;
1069 	if (wcid->amsdu)
1070 		val |= MT_TXD3_HW_AMSDU;
1071 
1072 	txwi[3] = cpu_to_le32(val);
1073 	txwi[4] = 0;
1074 
1075 	val = FIELD_PREP(MT_TXD5_PID, pid);
1076 	if (pid >= MT_PACKET_ID_FIRST)
1077 		val |= MT_TXD5_TX_STATUS_HOST;
1078 	txwi[5] = cpu_to_le32(val);
1079 
1080 	val = MT_TXD6_DIS_MAT | MT_TXD6_DAS |
1081 	      FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
1082 	txwi[6] = cpu_to_le32(val);
1083 	txwi[7] = 0;
1084 
1085 	if (is_8023)
1086 		mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
1087 	else
1088 		mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
1089 
1090 	if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
1091 		/* Fixed rata is available just for 802.11 txd */
1092 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1093 		bool multicast = is_multicast_ether_addr(hdr->addr1);
1094 		u16 rate = mt7996_mac_tx_rate_val(mphy, vif, beacon, multicast);
1095 
1096 		/* fix to bw 20 */
1097 		val = MT_TXD6_FIXED_BW |
1098 		      FIELD_PREP(MT_TXD6_BW, 0) |
1099 		      FIELD_PREP(MT_TXD6_TX_RATE, rate);
1100 
1101 		txwi[6] |= cpu_to_le32(val);
1102 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
1103 	}
1104 }
1105 
1106 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1107 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
1108 			  struct ieee80211_sta *sta,
1109 			  struct mt76_tx_info *tx_info)
1110 {
1111 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1112 	struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1113 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1114 	struct ieee80211_key_conf *key = info->control.hw_key;
1115 	struct ieee80211_vif *vif = info->control.vif;
1116 	struct mt76_txwi_cache *t;
1117 	struct mt7996_txp *txp;
1118 	int id, i, pid, nbuf = tx_info->nbuf - 1;
1119 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1120 	u8 *txwi = (u8 *)txwi_ptr;
1121 
1122 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
1123 		return -EINVAL;
1124 
1125 	if (!wcid)
1126 		wcid = &dev->mt76.global_wcid;
1127 
1128 	if (sta) {
1129 		struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
1130 
1131 		if (time_after(jiffies, msta->jiffies + HZ / 4)) {
1132 			info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1133 			msta->jiffies = jiffies;
1134 		}
1135 	}
1136 
1137 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
1138 	t->skb = tx_info->skb;
1139 
1140 	id = mt76_token_consume(mdev, &t);
1141 	if (id < 0)
1142 		return id;
1143 
1144 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
1145 	memset(txwi_ptr, 0, MT_TXD_SIZE);
1146 	/* Transmit non qos data by 802.11 header and need to fill txd by host*/
1147 	if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1148 		mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid,
1149 				      key, 0);
1150 
1151 	txp = (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
1152 	for (i = 0; i < nbuf; i++) {
1153 		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
1154 		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
1155 	}
1156 	txp->nbuf = nbuf;
1157 
1158 	txp->flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
1159 
1160 	if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1161 		txp->flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
1162 
1163 	if (!key)
1164 		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
1165 
1166 	if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
1167 		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
1168 
1169 	if (vif) {
1170 		struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
1171 
1172 		txp->bss_idx = mvif->mt76.idx;
1173 	}
1174 
1175 	txp->token = cpu_to_le16(id);
1176 	if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
1177 		txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
1178 	else
1179 		txp->rept_wds_wcid = cpu_to_le16(0xfff);
1180 	tx_info->skb = DMA_DUMMY_DATA;
1181 
1182 	/* pass partial skb header to fw */
1183 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
1184 	tx_info->buf[1].skip_unmap = true;
1185 	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1186 
1187 	return 0;
1188 }
1189 
1190 static void
1191 mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
1192 {
1193 	struct mt7996_sta *msta;
1194 	u16 fc, tid;
1195 	u32 val;
1196 
1197 	if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
1198 		return;
1199 
1200 	tid = le32_get_bits(txwi[1], MT_TXD1_TID);
1201 	if (tid >= 6) /* skip VO queue */
1202 		return;
1203 
1204 	val = le32_to_cpu(txwi[2]);
1205 	fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
1206 	     FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
1207 	if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1208 		return;
1209 
1210 	msta = (struct mt7996_sta *)sta->drv_priv;
1211 	if (!test_and_set_bit(tid, &msta->ampdu_state))
1212 		ieee80211_start_tx_ba_session(sta, tid, 0);
1213 }
1214 
1215 static void
1216 mt7996_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
1217 {
1218 	struct mt7996_txp *txp;
1219 	int i;
1220 
1221 	txp = mt7996_txwi_to_txp(dev, t);
1222 	for (i = 0; i < txp->nbuf; i++)
1223 		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
1224 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
1225 }
1226 
1227 static void
1228 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1229 		 struct ieee80211_sta *sta, struct list_head *free_list)
1230 {
1231 	struct mt76_dev *mdev = &dev->mt76;
1232 	struct mt76_wcid *wcid;
1233 	__le32 *txwi;
1234 	u16 wcid_idx;
1235 
1236 	mt7996_txp_skb_unmap(mdev, t);
1237 	if (!t->skb)
1238 		goto out;
1239 
1240 	txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1241 	if (sta) {
1242 		wcid = (struct mt76_wcid *)sta->drv_priv;
1243 		wcid_idx = wcid->idx;
1244 
1245 		if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1246 			mt7996_tx_check_aggr(sta, txwi);
1247 	} else {
1248 		wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1249 	}
1250 
1251 	__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1252 
1253 out:
1254 	t->skb = NULL;
1255 	mt76_put_txwi(mdev, t);
1256 }
1257 
1258 static void
1259 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1260 {
1261 	__le32 *tx_free = (__le32 *)data, *cur_info;
1262 	struct mt76_dev *mdev = &dev->mt76;
1263 	struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1264 	struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1265 	struct mt76_txwi_cache *txwi;
1266 	struct ieee80211_sta *sta = NULL;
1267 	LIST_HEAD(free_list);
1268 	struct sk_buff *skb, *tmp;
1269 	void *end = data + len;
1270 	bool wake = false;
1271 	u16 total, count = 0;
1272 
1273 	/* clean DMA queues and unmap buffers first */
1274 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1275 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1276 	if (phy2) {
1277 		mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1278 		mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1279 	}
1280 	if (phy3) {
1281 		mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1282 		mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1283 	}
1284 
1285 	if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
1286 		return;
1287 
1288 	total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1289 	for (cur_info = &tx_free[2]; count < total; cur_info++) {
1290 		u32 msdu, info;
1291 		u8 i;
1292 
1293 		if (WARN_ON_ONCE((void *)cur_info >= end))
1294 			return;
1295 		/* 1'b1: new wcid pair.
1296 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
1297 		 */
1298 		info = le32_to_cpu(*cur_info);
1299 		if (info & MT_TXFREE_INFO_PAIR) {
1300 			struct mt7996_sta *msta;
1301 			struct mt76_wcid *wcid;
1302 			u16 idx;
1303 
1304 			idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1305 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
1306 			sta = wcid_to_sta(wcid);
1307 			if (!sta)
1308 				continue;
1309 
1310 			msta = container_of(wcid, struct mt7996_sta, wcid);
1311 			spin_lock_bh(&dev->sta_poll_lock);
1312 			if (list_empty(&msta->poll_list))
1313 				list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1314 			spin_unlock_bh(&dev->sta_poll_lock);
1315 			continue;
1316 		}
1317 
1318 		if (info & MT_TXFREE_INFO_HEADER)
1319 			continue;
1320 
1321 		for (i = 0; i < 2; i++) {
1322 			msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1323 			if (msdu == MT_TXFREE_INFO_MSDU_ID)
1324 				continue;
1325 
1326 			count++;
1327 			txwi = mt76_token_release(mdev, msdu, &wake);
1328 			if (!txwi)
1329 				continue;
1330 
1331 			mt7996_txwi_free(dev, txwi, sta, &free_list);
1332 		}
1333 	}
1334 
1335 	mt7996_mac_sta_poll(dev);
1336 
1337 	if (wake)
1338 		mt76_set_tx_blocked(&dev->mt76, false);
1339 
1340 	mt76_worker_schedule(&dev->mt76.tx_worker);
1341 
1342 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
1343 		skb_list_del_init(skb);
1344 		napi_consume_skb(skb, 1);
1345 	}
1346 }
1347 
1348 static bool
1349 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
1350 		       __le32 *txs_data, struct mt76_sta_stats *stats)
1351 {
1352 	struct ieee80211_supported_band *sband;
1353 	struct mt76_dev *mdev = &dev->mt76;
1354 	struct mt76_phy *mphy;
1355 	struct ieee80211_tx_info *info;
1356 	struct sk_buff_head list;
1357 	struct rate_info rate = {};
1358 	struct sk_buff *skb;
1359 	bool cck = false;
1360 	u32 txrate, txs, mode, stbc;
1361 
1362 	mt76_tx_status_lock(mdev, &list);
1363 	skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1364 	if (!skb)
1365 		goto out_no_skb;
1366 
1367 	txs = le32_to_cpu(txs_data[0]);
1368 
1369 	info = IEEE80211_SKB_CB(skb);
1370 	if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1371 		info->flags |= IEEE80211_TX_STAT_ACK;
1372 
1373 	info->status.ampdu_len = 1;
1374 	info->status.ampdu_ack_len = !!(info->flags &
1375 					IEEE80211_TX_STAT_ACK);
1376 
1377 	info->status.rates[0].idx = -1;
1378 
1379 	txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1380 
1381 	rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1382 	rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1383 	stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1384 
1385 	if (stbc && rate.nss > 1)
1386 		rate.nss >>= 1;
1387 
1388 	if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1389 		stats->tx_nss[rate.nss - 1]++;
1390 	if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1391 		stats->tx_mcs[rate.mcs]++;
1392 
1393 	mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1394 	switch (mode) {
1395 	case MT_PHY_TYPE_CCK:
1396 		cck = true;
1397 		fallthrough;
1398 	case MT_PHY_TYPE_OFDM:
1399 		mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1400 
1401 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1402 			sband = &mphy->sband_5g.sband;
1403 		else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1404 			sband = &mphy->sband_6g.sband;
1405 		else
1406 			sband = &mphy->sband_2g.sband;
1407 
1408 		rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1409 		rate.legacy = sband->bitrates[rate.mcs].bitrate;
1410 		break;
1411 	case MT_PHY_TYPE_HT:
1412 	case MT_PHY_TYPE_HT_GF:
1413 		if (rate.mcs > 31)
1414 			goto out;
1415 
1416 		rate.flags = RATE_INFO_FLAGS_MCS;
1417 		if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1418 			rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1419 		break;
1420 	case MT_PHY_TYPE_VHT:
1421 		if (rate.mcs > 9)
1422 			goto out;
1423 
1424 		rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1425 		break;
1426 	case MT_PHY_TYPE_HE_SU:
1427 	case MT_PHY_TYPE_HE_EXT_SU:
1428 	case MT_PHY_TYPE_HE_TB:
1429 	case MT_PHY_TYPE_HE_MU:
1430 		if (rate.mcs > 11)
1431 			goto out;
1432 
1433 		rate.he_gi = wcid->rate.he_gi;
1434 		rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1435 		rate.flags = RATE_INFO_FLAGS_HE_MCS;
1436 		break;
1437 	default:
1438 		goto out;
1439 	}
1440 
1441 	stats->tx_mode[mode]++;
1442 
1443 	switch (FIELD_GET(MT_TXS0_BW, txs)) {
1444 	case IEEE80211_STA_RX_BW_160:
1445 		rate.bw = RATE_INFO_BW_160;
1446 		stats->tx_bw[3]++;
1447 		break;
1448 	case IEEE80211_STA_RX_BW_80:
1449 		rate.bw = RATE_INFO_BW_80;
1450 		stats->tx_bw[2]++;
1451 		break;
1452 	case IEEE80211_STA_RX_BW_40:
1453 		rate.bw = RATE_INFO_BW_40;
1454 		stats->tx_bw[1]++;
1455 		break;
1456 	default:
1457 		rate.bw = RATE_INFO_BW_20;
1458 		stats->tx_bw[0]++;
1459 		break;
1460 	}
1461 	wcid->rate = rate;
1462 
1463 out:
1464 	mt76_tx_status_skb_done(mdev, skb, &list);
1465 
1466 out_no_skb:
1467 	mt76_tx_status_unlock(mdev, &list);
1468 
1469 	return !!skb;
1470 }
1471 
1472 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1473 {
1474 	struct mt7996_sta *msta = NULL;
1475 	struct mt76_wcid *wcid;
1476 	__le32 *txs_data = data;
1477 	u16 wcidx;
1478 	u8 pid;
1479 
1480 	if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
1481 		return;
1482 
1483 	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1484 	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1485 
1486 	if (pid < MT_PACKET_ID_FIRST)
1487 		return;
1488 
1489 	if (wcidx >= MT7996_WTBL_SIZE)
1490 		return;
1491 
1492 	rcu_read_lock();
1493 
1494 	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1495 	if (!wcid)
1496 		goto out;
1497 
1498 	msta = container_of(wcid, struct mt7996_sta, wcid);
1499 
1500 	mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats);
1501 
1502 	if (!wcid->sta)
1503 		goto out;
1504 
1505 	spin_lock_bh(&dev->sta_poll_lock);
1506 	if (list_empty(&msta->poll_list))
1507 		list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1508 	spin_unlock_bh(&dev->sta_poll_lock);
1509 
1510 out:
1511 	rcu_read_unlock();
1512 }
1513 
1514 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1515 {
1516 	struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1517 	__le32 *rxd = (__le32 *)data;
1518 	__le32 *end = (__le32 *)&rxd[len / 4];
1519 	enum rx_pkt_type type;
1520 
1521 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1522 	if (type != PKT_TYPE_NORMAL) {
1523 		u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1524 
1525 		if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1526 			     MT_RXD0_SW_PKT_TYPE_FRAME))
1527 			return true;
1528 	}
1529 
1530 	switch (type) {
1531 	case PKT_TYPE_TXRX_NOTIFY:
1532 		mt7996_mac_tx_free(dev, data, len);
1533 		return false;
1534 	case PKT_TYPE_TXS:
1535 		for (rxd += 4; rxd + 8 <= end; rxd += 8)
1536 			mt7996_mac_add_txs(dev, rxd);
1537 		return false;
1538 	case PKT_TYPE_RX_FW_MONITOR:
1539 		mt7996_debugfs_rx_fw_monitor(dev, data, len);
1540 		return false;
1541 	default:
1542 		return true;
1543 	}
1544 }
1545 
1546 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1547 			 struct sk_buff *skb, u32 *info)
1548 {
1549 	struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1550 	__le32 *rxd = (__le32 *)skb->data;
1551 	__le32 *end = (__le32 *)&skb->data[skb->len];
1552 	enum rx_pkt_type type;
1553 
1554 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1555 	if (type != PKT_TYPE_NORMAL) {
1556 		u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1557 
1558 		if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1559 			     MT_RXD0_SW_PKT_TYPE_FRAME))
1560 			type = PKT_TYPE_NORMAL;
1561 	}
1562 
1563 	switch (type) {
1564 	case PKT_TYPE_TXRX_NOTIFY:
1565 		mt7996_mac_tx_free(dev, skb->data, skb->len);
1566 		napi_consume_skb(skb, 1);
1567 		break;
1568 	case PKT_TYPE_RX_EVENT:
1569 		mt7996_mcu_rx_event(dev, skb);
1570 		break;
1571 	case PKT_TYPE_TXS:
1572 		for (rxd += 4; rxd + 8 <= end; rxd += 8)
1573 			mt7996_mac_add_txs(dev, rxd);
1574 		dev_kfree_skb(skb);
1575 		break;
1576 	case PKT_TYPE_RX_FW_MONITOR:
1577 		mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1578 		dev_kfree_skb(skb);
1579 		break;
1580 	case PKT_TYPE_NORMAL:
1581 		if (!mt7996_mac_fill_rx(dev, skb)) {
1582 			mt76_rx(&dev->mt76, q, skb);
1583 			return;
1584 		}
1585 		fallthrough;
1586 	default:
1587 		dev_kfree_skb(skb);
1588 		break;
1589 	}
1590 }
1591 
1592 void mt7996_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1593 {
1594 	if (!e->txwi) {
1595 		dev_kfree_skb_any(e->skb);
1596 		return;
1597 	}
1598 
1599 	/* error path */
1600 	if (e->skb == DMA_DUMMY_DATA) {
1601 		struct mt76_txwi_cache *t;
1602 		struct mt7996_txp *txp;
1603 
1604 		txp = mt7996_txwi_to_txp(mdev, e->txwi);
1605 		t = mt76_token_put(mdev, le16_to_cpu(txp->token));
1606 		e->skb = t ? t->skb : NULL;
1607 	}
1608 
1609 	if (e->skb)
1610 		mt76_tx_complete_skb(mdev, e->wcid, e->skb);
1611 }
1612 
1613 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1614 {
1615 	struct mt7996_dev *dev = phy->dev;
1616 	u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
1617 
1618 	mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
1619 	mt76_set(dev, reg, BIT(11) | BIT(9));
1620 }
1621 
1622 void mt7996_mac_reset_counters(struct mt7996_phy *phy)
1623 {
1624 	struct mt7996_dev *dev = phy->dev;
1625 	u8 band_idx = phy->mt76->band_idx;
1626 	int i;
1627 
1628 	for (i = 0; i < 16; i++)
1629 		mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
1630 
1631 	phy->mt76->survey_time = ktime_get_boottime();
1632 
1633 	memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1634 
1635 	/* reset airtime counters */
1636 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
1637 		 MT_WF_RMAC_MIB_RXTIME_CLR);
1638 
1639 	mt7996_mcu_get_chan_mib_info(phy, true);
1640 }
1641 
1642 void mt7996_mac_set_timing(struct mt7996_phy *phy)
1643 {
1644 	s16 coverage_class = phy->coverage_class;
1645 	struct mt7996_dev *dev = phy->dev;
1646 	struct mt7996_phy *phy2 = mt7996_phy2(dev);
1647 	struct mt7996_phy *phy3 = mt7996_phy3(dev);
1648 	u32 val, reg_offset;
1649 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1650 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1651 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1652 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1653 	u8 band_idx = phy->mt76->band_idx;
1654 	int offset;
1655 	bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
1656 
1657 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1658 		return;
1659 
1660 	if (phy2)
1661 		coverage_class = max_t(s16, dev->phy.coverage_class,
1662 				       phy2->coverage_class);
1663 
1664 	if (phy3)
1665 		coverage_class = max_t(s16, coverage_class,
1666 				       phy3->coverage_class);
1667 
1668 	mt76_set(dev, MT_ARB_SCR(band_idx),
1669 		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1670 	udelay(1);
1671 
1672 	offset = 3 * coverage_class;
1673 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1674 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1675 
1676 	mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
1677 	mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
1678 	mt76_wr(dev, MT_TMAC_ICR0(band_idx),
1679 		FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
1680 		FIELD_PREP(MT_IFS_RIFS, 2) |
1681 		FIELD_PREP(MT_IFS_SIFS, 10) |
1682 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1683 
1684 	if (!a_band)
1685 		mt76_wr(dev, MT_TMAC_ICR1(band_idx),
1686 			FIELD_PREP(MT_IFS_EIFS_CCK, 314));
1687 
1688 	if (phy->slottime < 20 || a_band)
1689 		val = MT7996_CFEND_RATE_DEFAULT;
1690 	else
1691 		val = MT7996_CFEND_RATE_11B;
1692 
1693 	mt76_rmw_field(dev, MT_AGG_ACR0(band_idx), MT_AGG_ACR_CFEND_RATE, val);
1694 	mt76_clear(dev, MT_ARB_SCR(band_idx),
1695 		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1696 }
1697 
1698 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
1699 {
1700 	mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
1701 		 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
1702 		 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
1703 
1704 	mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
1705 		 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
1706 }
1707 
1708 static u8
1709 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
1710 {
1711 	static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1712 	struct mt7996_dev *dev = phy->dev;
1713 	u32 val, sum = 0, n = 0;
1714 	int ant, i;
1715 
1716 	for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
1717 		u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
1718 
1719 		for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1720 			val = mt76_rr(dev, reg);
1721 			sum += val * nf_power[i];
1722 			n += val;
1723 		}
1724 	}
1725 
1726 	return n ? sum / n : 0;
1727 }
1728 
1729 void mt7996_update_channel(struct mt76_phy *mphy)
1730 {
1731 	struct mt7996_phy *phy = (struct mt7996_phy *)mphy->priv;
1732 	struct mt76_channel_state *state = mphy->chan_state;
1733 	int nf;
1734 
1735 	mt7996_mcu_get_chan_mib_info(phy, false);
1736 
1737 	nf = mt7996_phy_get_nf(phy, mphy->band_idx);
1738 	if (!phy->noise)
1739 		phy->noise = nf << 4;
1740 	else if (nf)
1741 		phy->noise += nf - (phy->noise >> 4);
1742 
1743 	state->noise = -(phy->noise >> 4);
1744 }
1745 
1746 static bool
1747 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
1748 {
1749 	bool ret;
1750 
1751 	ret = wait_event_timeout(dev->reset_wait,
1752 				 (READ_ONCE(dev->reset_state) & state),
1753 				 MT7996_RESET_TIMEOUT);
1754 
1755 	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1756 	return ret;
1757 }
1758 
1759 static void
1760 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1761 {
1762 	struct ieee80211_hw *hw = priv;
1763 
1764 	switch (vif->type) {
1765 	case NL80211_IFTYPE_MESH_POINT:
1766 	case NL80211_IFTYPE_ADHOC:
1767 	case NL80211_IFTYPE_AP:
1768 		mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
1769 		break;
1770 	default:
1771 		break;
1772 	}
1773 }
1774 
1775 static void
1776 mt7996_update_beacons(struct mt7996_dev *dev)
1777 {
1778 	struct mt76_phy *phy2, *phy3;
1779 
1780 	ieee80211_iterate_active_interfaces(dev->mt76.hw,
1781 					    IEEE80211_IFACE_ITER_RESUME_ALL,
1782 					    mt7996_update_vif_beacon, dev->mt76.hw);
1783 
1784 	phy2 = dev->mt76.phys[MT_BAND1];
1785 	if (!phy2)
1786 		return;
1787 
1788 	ieee80211_iterate_active_interfaces(phy2->hw,
1789 					    IEEE80211_IFACE_ITER_RESUME_ALL,
1790 					    mt7996_update_vif_beacon, phy2->hw);
1791 
1792 	phy3 = dev->mt76.phys[MT_BAND2];
1793 	if (!phy3)
1794 		return;
1795 
1796 	ieee80211_iterate_active_interfaces(phy3->hw,
1797 					    IEEE80211_IFACE_ITER_RESUME_ALL,
1798 					    mt7996_update_vif_beacon, phy3->hw);
1799 }
1800 
1801 static void
1802 mt7996_dma_reset(struct mt7996_dev *dev)
1803 {
1804 	struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
1805 	struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
1806 	u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
1807 	int i;
1808 
1809 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
1810 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1811 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1812 
1813 	if (dev->hif2)
1814 		mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1815 			   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1816 			   MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1817 
1818 	usleep_range(1000, 2000);
1819 
1820 	for (i = 0; i < __MT_TXQ_MAX; i++) {
1821 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
1822 		if (phy2)
1823 			mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true);
1824 		if (phy3)
1825 			mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
1826 	}
1827 
1828 	for (i = 0; i < __MT_MCUQ_MAX; i++)
1829 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
1830 
1831 	mt76_for_each_q_rx(&dev->mt76, i)
1832 		mt76_queue_rx_reset(dev, i);
1833 
1834 	mt76_tx_status_check(&dev->mt76, true);
1835 
1836 	/* re-init prefetch settings after reset */
1837 	mt7996_dma_prefetch(dev);
1838 
1839 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
1840 		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1841 
1842 	if (dev->hif2)
1843 		mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1844 			 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1845 			 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1846 }
1847 
1848 void mt7996_tx_token_put(struct mt7996_dev *dev)
1849 {
1850 	struct mt76_txwi_cache *txwi;
1851 	int id;
1852 
1853 	spin_lock_bh(&dev->mt76.token_lock);
1854 	idr_for_each_entry(&dev->mt76.token, txwi, id) {
1855 		mt7996_txwi_free(dev, txwi, NULL, NULL);
1856 		dev->mt76.token_count--;
1857 	}
1858 	spin_unlock_bh(&dev->mt76.token_lock);
1859 	idr_destroy(&dev->mt76.token);
1860 }
1861 
1862 /* system error recovery */
1863 void mt7996_mac_reset_work(struct work_struct *work)
1864 {
1865 	struct mt7996_phy *phy2, *phy3;
1866 	struct mt7996_dev *dev;
1867 	int i;
1868 
1869 	dev = container_of(work, struct mt7996_dev, reset_work);
1870 	phy2 = mt7996_phy2(dev);
1871 	phy3 = mt7996_phy3(dev);
1872 
1873 	if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
1874 		return;
1875 
1876 	ieee80211_stop_queues(mt76_hw(dev));
1877 	if (phy2)
1878 		ieee80211_stop_queues(phy2->mt76->hw);
1879 	if (phy3)
1880 		ieee80211_stop_queues(phy3->mt76->hw);
1881 
1882 	set_bit(MT76_RESET, &dev->mphy.state);
1883 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1884 	wake_up(&dev->mt76.mcu.wait);
1885 	cancel_delayed_work_sync(&dev->mphy.mac_work);
1886 	if (phy2) {
1887 		set_bit(MT76_RESET, &phy2->mt76->state);
1888 		cancel_delayed_work_sync(&phy2->mt76->mac_work);
1889 	}
1890 	if (phy3) {
1891 		set_bit(MT76_RESET, &phy3->mt76->state);
1892 		cancel_delayed_work_sync(&phy3->mt76->mac_work);
1893 	}
1894 	mt76_worker_disable(&dev->mt76.tx_worker);
1895 	mt76_for_each_q_rx(&dev->mt76, i)
1896 		napi_disable(&dev->mt76.napi[i]);
1897 	napi_disable(&dev->mt76.tx_napi);
1898 
1899 	mutex_lock(&dev->mt76.mutex);
1900 
1901 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1902 
1903 	if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1904 		mt7996_dma_reset(dev);
1905 
1906 		mt7996_tx_token_put(dev);
1907 		idr_init(&dev->mt76.token);
1908 
1909 		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1910 		mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1911 	}
1912 
1913 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1914 	clear_bit(MT76_RESET, &dev->mphy.state);
1915 	if (phy2)
1916 		clear_bit(MT76_RESET, &phy2->mt76->state);
1917 	if (phy3)
1918 		clear_bit(MT76_RESET, &phy3->mt76->state);
1919 
1920 	local_bh_disable();
1921 	mt76_for_each_q_rx(&dev->mt76, i) {
1922 		napi_enable(&dev->mt76.napi[i]);
1923 		napi_schedule(&dev->mt76.napi[i]);
1924 	}
1925 	local_bh_enable();
1926 
1927 	tasklet_schedule(&dev->irq_tasklet);
1928 
1929 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1930 	mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1931 
1932 	mt76_worker_enable(&dev->mt76.tx_worker);
1933 
1934 	local_bh_disable();
1935 	napi_enable(&dev->mt76.tx_napi);
1936 	napi_schedule(&dev->mt76.tx_napi);
1937 	local_bh_enable();
1938 
1939 	ieee80211_wake_queues(mt76_hw(dev));
1940 	if (phy2)
1941 		ieee80211_wake_queues(phy2->mt76->hw);
1942 	if (phy3)
1943 		ieee80211_wake_queues(phy3->mt76->hw);
1944 
1945 	mutex_unlock(&dev->mt76.mutex);
1946 
1947 	mt7996_update_beacons(dev);
1948 
1949 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1950 				     MT7996_WATCHDOG_TIME);
1951 	if (phy2)
1952 		ieee80211_queue_delayed_work(phy2->mt76->hw,
1953 					     &phy2->mt76->mac_work,
1954 					     MT7996_WATCHDOG_TIME);
1955 	if (phy3)
1956 		ieee80211_queue_delayed_work(phy3->mt76->hw,
1957 					     &phy3->mt76->mac_work,
1958 					     MT7996_WATCHDOG_TIME);
1959 }
1960 
1961 void mt7996_mac_update_stats(struct mt7996_phy *phy)
1962 {
1963 	struct mt7996_dev *dev = phy->dev;
1964 	struct mib_stats *mib = &phy->mib;
1965 	u8 band_idx = phy->mt76->band_idx;
1966 	u32 cnt;
1967 	int i;
1968 
1969 	cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
1970 	mib->fcs_err_cnt += cnt;
1971 
1972 	cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
1973 	mib->rx_fifo_full_cnt += cnt;
1974 
1975 	cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
1976 	mib->rx_mpdu_cnt += cnt;
1977 
1978 	cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
1979 	mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
1980 
1981 	cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
1982 	mib->rx_vector_mismatch_cnt += cnt;
1983 
1984 	cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
1985 	mib->rx_delimiter_fail_cnt += cnt;
1986 
1987 	cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
1988 	mib->rx_len_mismatch_cnt += cnt;
1989 
1990 	cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
1991 	mib->tx_ampdu_cnt += cnt;
1992 
1993 	cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
1994 	mib->tx_stop_q_empty_cnt += cnt;
1995 
1996 	cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
1997 	mib->tx_mpdu_attempts_cnt += cnt;
1998 
1999 	cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2000 	mib->tx_mpdu_success_cnt += cnt;
2001 
2002 	cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2003 	mib->rx_ampdu_cnt += cnt;
2004 
2005 	cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2006 	mib->rx_ampdu_bytes_cnt += cnt;
2007 
2008 	cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2009 	mib->rx_ampdu_valid_subframe_cnt += cnt;
2010 
2011 	cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2012 	mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2013 
2014 	cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2015 	mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2016 
2017 	cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2018 	mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2019 
2020 	cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2021 	mib->rx_pfdrop_cnt += cnt;
2022 
2023 	cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2024 	mib->rx_vec_queue_overflow_drop_cnt += cnt;
2025 
2026 	cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2027 	mib->rx_ba_cnt += cnt;
2028 
2029 	cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2030 	mib->tx_bf_ebf_ppdu_cnt += cnt;
2031 
2032 	cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2033 	mib->tx_bf_ibf_ppdu_cnt += cnt;
2034 
2035 	cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2036 	mib->tx_mu_bf_cnt += cnt;
2037 
2038 	cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2039 	mib->tx_mu_mpdu_cnt += cnt;
2040 
2041 	cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2042 	mib->tx_mu_acked_mpdu_cnt += cnt;
2043 
2044 	cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2045 	mib->tx_su_acked_mpdu_cnt += cnt;
2046 
2047 	cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2048 	mib->tx_bf_rx_fb_ht_cnt += cnt;
2049 	mib->tx_bf_rx_fb_all_cnt += cnt;
2050 
2051 	cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2052 	mib->tx_bf_rx_fb_vht_cnt += cnt;
2053 	mib->tx_bf_rx_fb_all_cnt += cnt;
2054 
2055 	cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2056 	mib->tx_bf_rx_fb_he_cnt += cnt;
2057 	mib->tx_bf_rx_fb_all_cnt += cnt;
2058 
2059 	cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2060 	mib->tx_bf_rx_fb_eht_cnt += cnt;
2061 	mib->tx_bf_rx_fb_all_cnt += cnt;
2062 
2063 	cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2064 	mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2065 	mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2066 	mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2067 
2068 	cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2069 	mib->tx_bf_fb_trig_cnt += cnt;
2070 
2071 	cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2072 	mib->tx_bf_fb_cpl_cnt += cnt;
2073 
2074 	for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2075 		cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2076 		mib->tx_amsdu[i] += cnt;
2077 		mib->tx_amsdu_cnt += cnt;
2078 	}
2079 
2080 	/* rts count */
2081 	cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2082 	mib->rts_cnt += cnt;
2083 
2084 	/* rts retry count */
2085 	cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2086 	mib->rts_retries_cnt += cnt;
2087 
2088 	/* ba miss count */
2089 	cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2090 	mib->ba_miss_cnt += cnt;
2091 
2092 	/* ack fail count */
2093 	cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2094 	mib->ack_fail_cnt += cnt;
2095 
2096 	for (i = 0; i < 16; i++) {
2097 		cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2098 		phy->mt76->aggr_stats[i] += cnt;
2099 	}
2100 }
2101 
2102 void mt7996_mac_sta_rc_work(struct work_struct *work)
2103 {
2104 	struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2105 	struct ieee80211_sta *sta;
2106 	struct ieee80211_vif *vif;
2107 	struct mt7996_sta *msta;
2108 	u32 changed;
2109 	LIST_HEAD(list);
2110 
2111 	spin_lock_bh(&dev->sta_poll_lock);
2112 	list_splice_init(&dev->sta_rc_list, &list);
2113 
2114 	while (!list_empty(&list)) {
2115 		msta = list_first_entry(&list, struct mt7996_sta, rc_list);
2116 		list_del_init(&msta->rc_list);
2117 		changed = msta->changed;
2118 		msta->changed = 0;
2119 		spin_unlock_bh(&dev->sta_poll_lock);
2120 
2121 		sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
2122 		vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
2123 
2124 		if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2125 			       IEEE80211_RC_NSS_CHANGED |
2126 			       IEEE80211_RC_BW_CHANGED))
2127 			mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
2128 
2129 		/* TODO: smps change */
2130 
2131 		spin_lock_bh(&dev->sta_poll_lock);
2132 	}
2133 
2134 	spin_unlock_bh(&dev->sta_poll_lock);
2135 }
2136 
2137 void mt7996_mac_work(struct work_struct *work)
2138 {
2139 	struct mt7996_phy *phy;
2140 	struct mt76_phy *mphy;
2141 
2142 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2143 					       mac_work.work);
2144 	phy = mphy->priv;
2145 
2146 	mutex_lock(&mphy->dev->mutex);
2147 
2148 	mt76_update_survey(mphy);
2149 	if (++mphy->mac_work_count == 5) {
2150 		mphy->mac_work_count = 0;
2151 
2152 		mt7996_mac_update_stats(phy);
2153 	}
2154 
2155 	mutex_unlock(&mphy->dev->mutex);
2156 
2157 	mt76_tx_status_check(mphy->dev, false);
2158 
2159 	ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2160 				     MT7996_WATCHDOG_TIME);
2161 }
2162 
2163 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2164 {
2165 	struct mt7996_dev *dev = phy->dev;
2166 
2167 	if (phy->rdd_state & BIT(0))
2168 		mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
2169 				   MT_RX_SEL0, 0);
2170 	if (phy->rdd_state & BIT(1))
2171 		mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
2172 				   MT_RX_SEL0, 0);
2173 }
2174 
2175 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
2176 {
2177 	int err, region;
2178 
2179 	switch (dev->mt76.region) {
2180 	case NL80211_DFS_ETSI:
2181 		region = 0;
2182 		break;
2183 	case NL80211_DFS_JP:
2184 		region = 2;
2185 		break;
2186 	case NL80211_DFS_FCC:
2187 	default:
2188 		region = 1;
2189 		break;
2190 	}
2191 
2192 	err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
2193 				 MT_RX_SEL0, region);
2194 	if (err < 0)
2195 		return err;
2196 
2197 	return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
2198 				 MT_RX_SEL0, 1);
2199 }
2200 
2201 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2202 {
2203 	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2204 	struct mt7996_dev *dev = phy->dev;
2205 	u8 band_idx = phy->mt76->band_idx;
2206 	int err;
2207 
2208 	/* start CAC */
2209 	err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
2210 				 MT_RX_SEL0, 0);
2211 	if (err < 0)
2212 		return err;
2213 
2214 	err = mt7996_dfs_start_rdd(dev, band_idx);
2215 	if (err < 0)
2216 		return err;
2217 
2218 	phy->rdd_state |= BIT(band_idx);
2219 
2220 	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2221 	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
2222 		err = mt7996_dfs_start_rdd(dev, 1);
2223 		if (err < 0)
2224 			return err;
2225 
2226 		phy->rdd_state |= BIT(1);
2227 	}
2228 
2229 	return 0;
2230 }
2231 
2232 static int
2233 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
2234 {
2235 	const struct mt7996_dfs_radar_spec *radar_specs;
2236 	struct mt7996_dev *dev = phy->dev;
2237 	int err, i;
2238 
2239 	switch (dev->mt76.region) {
2240 	case NL80211_DFS_FCC:
2241 		radar_specs = &fcc_radar_specs;
2242 		err = mt7996_mcu_set_fcc5_lpn(dev, 8);
2243 		if (err < 0)
2244 			return err;
2245 		break;
2246 	case NL80211_DFS_ETSI:
2247 		radar_specs = &etsi_radar_specs;
2248 		break;
2249 	case NL80211_DFS_JP:
2250 		radar_specs = &jp_radar_specs;
2251 		break;
2252 	default:
2253 		return -EINVAL;
2254 	}
2255 
2256 	for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2257 		err = mt7996_mcu_set_radar_th(dev, i,
2258 					      &radar_specs->radar_pattern[i]);
2259 		if (err < 0)
2260 			return err;
2261 	}
2262 
2263 	return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2264 }
2265 
2266 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2267 {
2268 	struct mt7996_dev *dev = phy->dev;
2269 	enum mt76_dfs_state dfs_state, prev_state;
2270 	int err;
2271 
2272 	prev_state = phy->mt76->dfs_state;
2273 	dfs_state = mt76_phy_dfs_state(phy->mt76);
2274 
2275 	if (prev_state == dfs_state)
2276 		return 0;
2277 
2278 	if (prev_state == MT_DFS_STATE_UNKNOWN)
2279 		mt7996_dfs_stop_radar_detector(phy);
2280 
2281 	if (dfs_state == MT_DFS_STATE_DISABLED)
2282 		goto stop;
2283 
2284 	if (prev_state <= MT_DFS_STATE_DISABLED) {
2285 		err = mt7996_dfs_init_radar_specs(phy);
2286 		if (err < 0)
2287 			return err;
2288 
2289 		err = mt7996_dfs_start_radar_detector(phy);
2290 		if (err < 0)
2291 			return err;
2292 
2293 		phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2294 	}
2295 
2296 	if (dfs_state == MT_DFS_STATE_CAC)
2297 		return 0;
2298 
2299 	err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
2300 				 phy->mt76->band_idx, MT_RX_SEL0, 0);
2301 	if (err < 0) {
2302 		phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2303 		return err;
2304 	}
2305 
2306 	phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2307 	return 0;
2308 
2309 stop:
2310 	err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
2311 				 phy->mt76->band_idx, MT_RX_SEL0, 0);
2312 	if (err < 0)
2313 		return err;
2314 
2315 	mt7996_dfs_stop_radar_detector(phy);
2316 	phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2317 
2318 	return 0;
2319 }
2320 
2321 static int
2322 mt7996_mac_twt_duration_align(int duration)
2323 {
2324 	return duration << 8;
2325 }
2326 
2327 static u64
2328 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
2329 			      struct mt7996_twt_flow *flow)
2330 {
2331 	struct mt7996_twt_flow *iter, *iter_next;
2332 	u32 duration = flow->duration << 8;
2333 	u64 start_tsf;
2334 
2335 	iter = list_first_entry_or_null(&dev->twt_list,
2336 					struct mt7996_twt_flow, list);
2337 	if (!iter || !iter->sched || iter->start_tsf > duration) {
2338 		/* add flow as first entry in the list */
2339 		list_add(&flow->list, &dev->twt_list);
2340 		return 0;
2341 	}
2342 
2343 	list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2344 		start_tsf = iter->start_tsf +
2345 			    mt7996_mac_twt_duration_align(iter->duration);
2346 		if (list_is_last(&iter->list, &dev->twt_list))
2347 			break;
2348 
2349 		if (!iter_next->sched ||
2350 		    iter_next->start_tsf > start_tsf + duration) {
2351 			list_add(&flow->list, &iter->list);
2352 			goto out;
2353 		}
2354 	}
2355 
2356 	/* add flow as last entry in the list */
2357 	list_add_tail(&flow->list, &dev->twt_list);
2358 out:
2359 	return start_tsf;
2360 }
2361 
2362 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2363 {
2364 	struct ieee80211_twt_params *twt_agrt;
2365 	u64 interval, duration;
2366 	u16 mantissa;
2367 	u8 exp;
2368 
2369 	/* only individual agreement supported */
2370 	if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2371 		return -EOPNOTSUPP;
2372 
2373 	/* only 256us unit supported */
2374 	if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2375 		return -EOPNOTSUPP;
2376 
2377 	twt_agrt = (struct ieee80211_twt_params *)twt->params;
2378 
2379 	/* explicit agreement not supported */
2380 	if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2381 		return -EOPNOTSUPP;
2382 
2383 	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2384 			le16_to_cpu(twt_agrt->req_type));
2385 	mantissa = le16_to_cpu(twt_agrt->mantissa);
2386 	duration = twt_agrt->min_twt_dur << 8;
2387 
2388 	interval = (u64)mantissa << exp;
2389 	if (interval < duration)
2390 		return -EOPNOTSUPP;
2391 
2392 	return 0;
2393 }
2394 
2395 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
2396 			      struct ieee80211_sta *sta,
2397 			      struct ieee80211_twt_setup *twt)
2398 {
2399 	enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2400 	struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2401 	struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2402 	u16 req_type = le16_to_cpu(twt_agrt->req_type);
2403 	enum ieee80211_twt_setup_cmd sta_setup_cmd;
2404 	struct mt7996_dev *dev = mt7996_hw_dev(hw);
2405 	struct mt7996_twt_flow *flow;
2406 	int flowid, table_id;
2407 	u8 exp;
2408 
2409 	if (mt7996_mac_check_twt_req(twt))
2410 		goto out;
2411 
2412 	mutex_lock(&dev->mt76.mutex);
2413 
2414 	if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
2415 		goto unlock;
2416 
2417 	if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2418 		goto unlock;
2419 
2420 	flowid = ffs(~msta->twt.flowid_mask) - 1;
2421 	le16p_replace_bits(&twt_agrt->req_type, flowid,
2422 			   IEEE80211_TWT_REQTYPE_FLOWID);
2423 
2424 	table_id = ffs(~dev->twt.table_mask) - 1;
2425 	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2426 	sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2427 
2428 	flow = &msta->twt.flow[flowid];
2429 	memset(flow, 0, sizeof(*flow));
2430 	INIT_LIST_HEAD(&flow->list);
2431 	flow->wcid = msta->wcid.idx;
2432 	flow->table_id = table_id;
2433 	flow->id = flowid;
2434 	flow->duration = twt_agrt->min_twt_dur;
2435 	flow->mantissa = twt_agrt->mantissa;
2436 	flow->exp = exp;
2437 	flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2438 	flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2439 	flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2440 
2441 	if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2442 	    sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2443 		u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2444 		u64 flow_tsf, curr_tsf;
2445 		u32 rem;
2446 
2447 		flow->sched = true;
2448 		flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
2449 		curr_tsf = __mt7996_get_tsf(hw, msta->vif);
2450 		div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2451 		flow_tsf = curr_tsf + interval - rem;
2452 		twt_agrt->twt = cpu_to_le64(flow_tsf);
2453 	} else {
2454 		list_add_tail(&flow->list, &dev->twt_list);
2455 	}
2456 	flow->tsf = le64_to_cpu(twt_agrt->twt);
2457 
2458 	if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2459 		goto unlock;
2460 
2461 	setup_cmd = TWT_SETUP_CMD_ACCEPT;
2462 	dev->twt.table_mask |= BIT(table_id);
2463 	msta->twt.flowid_mask |= BIT(flowid);
2464 	dev->twt.n_agrt++;
2465 
2466 unlock:
2467 	mutex_unlock(&dev->mt76.mutex);
2468 out:
2469 	le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
2470 			   IEEE80211_TWT_REQTYPE_SETUP_CMD);
2471 	twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
2472 		       (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
2473 }
2474 
2475 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
2476 				  struct mt7996_sta *msta,
2477 				  u8 flowid)
2478 {
2479 	struct mt7996_twt_flow *flow;
2480 
2481 	lockdep_assert_held(&dev->mt76.mutex);
2482 
2483 	if (flowid >= ARRAY_SIZE(msta->twt.flow))
2484 		return;
2485 
2486 	if (!(msta->twt.flowid_mask & BIT(flowid)))
2487 		return;
2488 
2489 	flow = &msta->twt.flow[flowid];
2490 	if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
2491 				       MCU_TWT_AGRT_DELETE))
2492 		return;
2493 
2494 	list_del_init(&flow->list);
2495 	msta->twt.flowid_mask &= ~BIT(flowid);
2496 	dev->twt.table_mask &= ~BIT(flow->table_id);
2497 	dev->twt.n_agrt--;
2498 }
2499