xref: /freebsd/sys/contrib/dev/mediatek/mt76/mt7996/mac.c (revision cbb3ec25236ba72f91cbdf23f8b78b9d1af0cedf)
1*cbb3ec25SBjoern A. Zeeb // SPDX-License-Identifier: ISC
2*cbb3ec25SBjoern A. Zeeb /*
3*cbb3ec25SBjoern A. Zeeb  * Copyright (C) 2022 MediaTek Inc.
4*cbb3ec25SBjoern A. Zeeb  */
5*cbb3ec25SBjoern A. Zeeb 
6*cbb3ec25SBjoern A. Zeeb #include <linux/etherdevice.h>
7*cbb3ec25SBjoern A. Zeeb #include <linux/timekeeping.h>
8*cbb3ec25SBjoern A. Zeeb #include "coredump.h"
9*cbb3ec25SBjoern A. Zeeb #include "mt7996.h"
10*cbb3ec25SBjoern A. Zeeb #include "../dma.h"
11*cbb3ec25SBjoern A. Zeeb #include "mac.h"
12*cbb3ec25SBjoern A. Zeeb #include "mcu.h"
13*cbb3ec25SBjoern A. Zeeb #if defined(__FreeBSD__)
14*cbb3ec25SBjoern A. Zeeb #include <linux/delay.h>
15*cbb3ec25SBjoern A. Zeeb #endif
16*cbb3ec25SBjoern A. Zeeb 
17*cbb3ec25SBjoern A. Zeeb #define to_rssi(field, rcpi)	((FIELD_GET(field, rcpi) - 220) / 2)
18*cbb3ec25SBjoern A. Zeeb 
19*cbb3ec25SBjoern A. Zeeb static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
20*cbb3ec25SBjoern A. Zeeb 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
21*cbb3ec25SBjoern A. Zeeb 	.radar_pattern = {
22*cbb3ec25SBjoern A. Zeeb 		[5] =  { 1, 0,  6, 32, 28, 0,  990, 5010, 17, 1, 1 },
23*cbb3ec25SBjoern A. Zeeb 		[6] =  { 1, 0,  9, 32, 28, 0,  615, 5010, 27, 1, 1 },
24*cbb3ec25SBjoern A. Zeeb 		[7] =  { 1, 0, 15, 32, 28, 0,  240,  445, 27, 1, 1 },
25*cbb3ec25SBjoern A. Zeeb 		[8] =  { 1, 0, 12, 32, 28, 0,  240,  510, 42, 1, 1 },
26*cbb3ec25SBjoern A. Zeeb 		[9] =  { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
27*cbb3ec25SBjoern A. Zeeb 		[10] = { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
28*cbb3ec25SBjoern A. Zeeb 		[11] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 18, 32, 28, { },  54 },
29*cbb3ec25SBjoern A. Zeeb 		[12] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 27, 32, 24, { },  54 },
30*cbb3ec25SBjoern A. Zeeb 	},
31*cbb3ec25SBjoern A. Zeeb };
32*cbb3ec25SBjoern A. Zeeb 
33*cbb3ec25SBjoern A. Zeeb static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
34*cbb3ec25SBjoern A. Zeeb 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
35*cbb3ec25SBjoern A. Zeeb 	.radar_pattern = {
36*cbb3ec25SBjoern A. Zeeb 		[0] = { 1, 0,  8,  32, 28, 0, 508, 3076, 13, 1,  1 },
37*cbb3ec25SBjoern A. Zeeb 		[1] = { 1, 0, 12,  32, 28, 0, 140,  240, 17, 1,  1 },
38*cbb3ec25SBjoern A. Zeeb 		[2] = { 1, 0,  8,  32, 28, 0, 190,  510, 22, 1,  1 },
39*cbb3ec25SBjoern A. Zeeb 		[3] = { 1, 0,  6,  32, 28, 0, 190,  510, 32, 1,  1 },
40*cbb3ec25SBjoern A. Zeeb 		[4] = { 1, 0,  9, 255, 28, 0, 323,  343, 13, 1, 32 },
41*cbb3ec25SBjoern A. Zeeb 	},
42*cbb3ec25SBjoern A. Zeeb };
43*cbb3ec25SBjoern A. Zeeb 
44*cbb3ec25SBjoern A. Zeeb static const struct mt7996_dfs_radar_spec jp_radar_specs = {
45*cbb3ec25SBjoern A. Zeeb 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
46*cbb3ec25SBjoern A. Zeeb 	.radar_pattern = {
47*cbb3ec25SBjoern A. Zeeb 		[0] =  { 1, 0,  8,  32, 28, 0,  508, 3076,  13, 1,  1 },
48*cbb3ec25SBjoern A. Zeeb 		[1] =  { 1, 0, 12,  32, 28, 0,  140,  240,  17, 1,  1 },
49*cbb3ec25SBjoern A. Zeeb 		[2] =  { 1, 0,  8,  32, 28, 0,  190,  510,  22, 1,  1 },
50*cbb3ec25SBjoern A. Zeeb 		[3] =  { 1, 0,  6,  32, 28, 0,  190,  510,  32, 1,  1 },
51*cbb3ec25SBjoern A. Zeeb 		[4] =  { 1, 0,  9, 255, 28, 0,  323,  343,  13, 1, 32 },
52*cbb3ec25SBjoern A. Zeeb 		[13] = { 1, 0,  7,  32, 28, 0, 3836, 3856,  14, 1,  1 },
53*cbb3ec25SBjoern A. Zeeb 		[14] = { 1, 0,  6,  32, 28, 0,  615, 5010, 110, 1,  1 },
54*cbb3ec25SBjoern A. Zeeb 		[15] = { 1, 1,  0,   0,  0, 0,   15, 5010, 110, 0,  0, 12, 32, 28 },
55*cbb3ec25SBjoern A. Zeeb 	},
56*cbb3ec25SBjoern A. Zeeb };
57*cbb3ec25SBjoern A. Zeeb 
58*cbb3ec25SBjoern A. Zeeb static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
59*cbb3ec25SBjoern A. Zeeb 					    u16 idx, bool unicast)
60*cbb3ec25SBjoern A. Zeeb {
61*cbb3ec25SBjoern A. Zeeb 	struct mt7996_sta *sta;
62*cbb3ec25SBjoern A. Zeeb 	struct mt76_wcid *wcid;
63*cbb3ec25SBjoern A. Zeeb 
64*cbb3ec25SBjoern A. Zeeb 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
65*cbb3ec25SBjoern A. Zeeb 		return NULL;
66*cbb3ec25SBjoern A. Zeeb 
67*cbb3ec25SBjoern A. Zeeb 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
68*cbb3ec25SBjoern A. Zeeb 	if (unicast || !wcid)
69*cbb3ec25SBjoern A. Zeeb 		return wcid;
70*cbb3ec25SBjoern A. Zeeb 
71*cbb3ec25SBjoern A. Zeeb 	if (!wcid->sta)
72*cbb3ec25SBjoern A. Zeeb 		return NULL;
73*cbb3ec25SBjoern A. Zeeb 
74*cbb3ec25SBjoern A. Zeeb 	sta = container_of(wcid, struct mt7996_sta, wcid);
75*cbb3ec25SBjoern A. Zeeb 	if (!sta->vif)
76*cbb3ec25SBjoern A. Zeeb 		return NULL;
77*cbb3ec25SBjoern A. Zeeb 
78*cbb3ec25SBjoern A. Zeeb 	return &sta->vif->sta.wcid;
79*cbb3ec25SBjoern A. Zeeb }
80*cbb3ec25SBjoern A. Zeeb 
81*cbb3ec25SBjoern A. Zeeb bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
82*cbb3ec25SBjoern A. Zeeb {
83*cbb3ec25SBjoern A. Zeeb 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
84*cbb3ec25SBjoern A. Zeeb 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
85*cbb3ec25SBjoern A. Zeeb 
86*cbb3ec25SBjoern A. Zeeb 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
87*cbb3ec25SBjoern A. Zeeb 			 0, 5000);
88*cbb3ec25SBjoern A. Zeeb }
89*cbb3ec25SBjoern A. Zeeb 
90*cbb3ec25SBjoern A. Zeeb u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
91*cbb3ec25SBjoern A. Zeeb {
92*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
93*cbb3ec25SBjoern A. Zeeb 		FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
94*cbb3ec25SBjoern A. Zeeb 
95*cbb3ec25SBjoern A. Zeeb 	return MT_WTBL_LMAC_OFFS(wcid, dw);
96*cbb3ec25SBjoern A. Zeeb }
97*cbb3ec25SBjoern A. Zeeb 
98*cbb3ec25SBjoern A. Zeeb static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
99*cbb3ec25SBjoern A. Zeeb {
100*cbb3ec25SBjoern A. Zeeb 	static const u8 ac_to_tid[] = {
101*cbb3ec25SBjoern A. Zeeb 		[IEEE80211_AC_BE] = 0,
102*cbb3ec25SBjoern A. Zeeb 		[IEEE80211_AC_BK] = 1,
103*cbb3ec25SBjoern A. Zeeb 		[IEEE80211_AC_VI] = 4,
104*cbb3ec25SBjoern A. Zeeb 		[IEEE80211_AC_VO] = 6
105*cbb3ec25SBjoern A. Zeeb 	};
106*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_sta *sta;
107*cbb3ec25SBjoern A. Zeeb 	struct mt7996_sta *msta;
108*cbb3ec25SBjoern A. Zeeb 	struct rate_info *rate;
109*cbb3ec25SBjoern A. Zeeb 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
110*cbb3ec25SBjoern A. Zeeb 	LIST_HEAD(sta_poll_list);
111*cbb3ec25SBjoern A. Zeeb 	int i;
112*cbb3ec25SBjoern A. Zeeb 
113*cbb3ec25SBjoern A. Zeeb 	spin_lock_bh(&dev->mt76.sta_poll_lock);
114*cbb3ec25SBjoern A. Zeeb 	list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
115*cbb3ec25SBjoern A. Zeeb 	spin_unlock_bh(&dev->mt76.sta_poll_lock);
116*cbb3ec25SBjoern A. Zeeb 
117*cbb3ec25SBjoern A. Zeeb 	rcu_read_lock();
118*cbb3ec25SBjoern A. Zeeb 
119*cbb3ec25SBjoern A. Zeeb 	while (true) {
120*cbb3ec25SBjoern A. Zeeb 		bool clear = false;
121*cbb3ec25SBjoern A. Zeeb 		u32 addr, val;
122*cbb3ec25SBjoern A. Zeeb 		u16 idx;
123*cbb3ec25SBjoern A. Zeeb 		s8 rssi[4];
124*cbb3ec25SBjoern A. Zeeb 		u8 bw;
125*cbb3ec25SBjoern A. Zeeb 
126*cbb3ec25SBjoern A. Zeeb 		spin_lock_bh(&dev->mt76.sta_poll_lock);
127*cbb3ec25SBjoern A. Zeeb 		if (list_empty(&sta_poll_list)) {
128*cbb3ec25SBjoern A. Zeeb 			spin_unlock_bh(&dev->mt76.sta_poll_lock);
129*cbb3ec25SBjoern A. Zeeb 			break;
130*cbb3ec25SBjoern A. Zeeb 		}
131*cbb3ec25SBjoern A. Zeeb 		msta = list_first_entry(&sta_poll_list,
132*cbb3ec25SBjoern A. Zeeb 					struct mt7996_sta, wcid.poll_list);
133*cbb3ec25SBjoern A. Zeeb 		list_del_init(&msta->wcid.poll_list);
134*cbb3ec25SBjoern A. Zeeb 		spin_unlock_bh(&dev->mt76.sta_poll_lock);
135*cbb3ec25SBjoern A. Zeeb 
136*cbb3ec25SBjoern A. Zeeb 		idx = msta->wcid.idx;
137*cbb3ec25SBjoern A. Zeeb 
138*cbb3ec25SBjoern A. Zeeb 		/* refresh peer's airtime reporting */
139*cbb3ec25SBjoern A. Zeeb 		addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
140*cbb3ec25SBjoern A. Zeeb 
141*cbb3ec25SBjoern A. Zeeb 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
142*cbb3ec25SBjoern A. Zeeb 			u32 tx_last = msta->airtime_ac[i];
143*cbb3ec25SBjoern A. Zeeb 			u32 rx_last = msta->airtime_ac[i + 4];
144*cbb3ec25SBjoern A. Zeeb 
145*cbb3ec25SBjoern A. Zeeb 			msta->airtime_ac[i] = mt76_rr(dev, addr);
146*cbb3ec25SBjoern A. Zeeb 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
147*cbb3ec25SBjoern A. Zeeb 
148*cbb3ec25SBjoern A. Zeeb 			tx_time[i] = msta->airtime_ac[i] - tx_last;
149*cbb3ec25SBjoern A. Zeeb 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
150*cbb3ec25SBjoern A. Zeeb 
151*cbb3ec25SBjoern A. Zeeb 			if ((tx_last | rx_last) & BIT(30))
152*cbb3ec25SBjoern A. Zeeb 				clear = true;
153*cbb3ec25SBjoern A. Zeeb 
154*cbb3ec25SBjoern A. Zeeb 			addr += 8;
155*cbb3ec25SBjoern A. Zeeb 		}
156*cbb3ec25SBjoern A. Zeeb 
157*cbb3ec25SBjoern A. Zeeb 		if (clear) {
158*cbb3ec25SBjoern A. Zeeb 			mt7996_mac_wtbl_update(dev, idx,
159*cbb3ec25SBjoern A. Zeeb 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
160*cbb3ec25SBjoern A. Zeeb 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
161*cbb3ec25SBjoern A. Zeeb 		}
162*cbb3ec25SBjoern A. Zeeb 
163*cbb3ec25SBjoern A. Zeeb 		if (!msta->wcid.sta)
164*cbb3ec25SBjoern A. Zeeb 			continue;
165*cbb3ec25SBjoern A. Zeeb 
166*cbb3ec25SBjoern A. Zeeb 		sta = container_of((void *)msta, struct ieee80211_sta,
167*cbb3ec25SBjoern A. Zeeb 				   drv_priv);
168*cbb3ec25SBjoern A. Zeeb 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
169*cbb3ec25SBjoern A. Zeeb 			u8 q = mt76_connac_lmac_mapping(i);
170*cbb3ec25SBjoern A. Zeeb 			u32 tx_cur = tx_time[q];
171*cbb3ec25SBjoern A. Zeeb 			u32 rx_cur = rx_time[q];
172*cbb3ec25SBjoern A. Zeeb 			u8 tid = ac_to_tid[i];
173*cbb3ec25SBjoern A. Zeeb 
174*cbb3ec25SBjoern A. Zeeb 			if (!tx_cur && !rx_cur)
175*cbb3ec25SBjoern A. Zeeb 				continue;
176*cbb3ec25SBjoern A. Zeeb 
177*cbb3ec25SBjoern A. Zeeb 			ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
178*cbb3ec25SBjoern A. Zeeb 		}
179*cbb3ec25SBjoern A. Zeeb 
180*cbb3ec25SBjoern A. Zeeb 		/* We don't support reading GI info from txs packets.
181*cbb3ec25SBjoern A. Zeeb 		 * For accurate tx status reporting and AQL improvement,
182*cbb3ec25SBjoern A. Zeeb 		 * we need to make sure that flags match so polling GI
183*cbb3ec25SBjoern A. Zeeb 		 * from per-sta counters directly.
184*cbb3ec25SBjoern A. Zeeb 		 */
185*cbb3ec25SBjoern A. Zeeb 		rate = &msta->wcid.rate;
186*cbb3ec25SBjoern A. Zeeb 
187*cbb3ec25SBjoern A. Zeeb 		switch (rate->bw) {
188*cbb3ec25SBjoern A. Zeeb 		case RATE_INFO_BW_320:
189*cbb3ec25SBjoern A. Zeeb 			bw = IEEE80211_STA_RX_BW_320;
190*cbb3ec25SBjoern A. Zeeb 			break;
191*cbb3ec25SBjoern A. Zeeb 		case RATE_INFO_BW_160:
192*cbb3ec25SBjoern A. Zeeb 			bw = IEEE80211_STA_RX_BW_160;
193*cbb3ec25SBjoern A. Zeeb 			break;
194*cbb3ec25SBjoern A. Zeeb 		case RATE_INFO_BW_80:
195*cbb3ec25SBjoern A. Zeeb 			bw = IEEE80211_STA_RX_BW_80;
196*cbb3ec25SBjoern A. Zeeb 			break;
197*cbb3ec25SBjoern A. Zeeb 		case RATE_INFO_BW_40:
198*cbb3ec25SBjoern A. Zeeb 			bw = IEEE80211_STA_RX_BW_40;
199*cbb3ec25SBjoern A. Zeeb 			break;
200*cbb3ec25SBjoern A. Zeeb 		default:
201*cbb3ec25SBjoern A. Zeeb 			bw = IEEE80211_STA_RX_BW_20;
202*cbb3ec25SBjoern A. Zeeb 			break;
203*cbb3ec25SBjoern A. Zeeb 		}
204*cbb3ec25SBjoern A. Zeeb 
205*cbb3ec25SBjoern A. Zeeb 		addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6);
206*cbb3ec25SBjoern A. Zeeb 		val = mt76_rr(dev, addr);
207*cbb3ec25SBjoern A. Zeeb 		if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
208*cbb3ec25SBjoern A. Zeeb 			addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 5);
209*cbb3ec25SBjoern A. Zeeb 			val = mt76_rr(dev, addr);
210*cbb3ec25SBjoern A. Zeeb 			rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
211*cbb3ec25SBjoern A. Zeeb 		} else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
212*cbb3ec25SBjoern A. Zeeb 			u8 offs = 24 + 2 * bw;
213*cbb3ec25SBjoern A. Zeeb 
214*cbb3ec25SBjoern A. Zeeb 			rate->he_gi = (val & (0x3 << offs)) >> offs;
215*cbb3ec25SBjoern A. Zeeb 		} else if (rate->flags &
216*cbb3ec25SBjoern A. Zeeb 			   (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
217*cbb3ec25SBjoern A. Zeeb 			if (val & BIT(12 + bw))
218*cbb3ec25SBjoern A. Zeeb 				rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
219*cbb3ec25SBjoern A. Zeeb 			else
220*cbb3ec25SBjoern A. Zeeb 				rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
221*cbb3ec25SBjoern A. Zeeb 		}
222*cbb3ec25SBjoern A. Zeeb 
223*cbb3ec25SBjoern A. Zeeb 		/* get signal strength of resp frames (CTS/BA/ACK) */
224*cbb3ec25SBjoern A. Zeeb 		addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
225*cbb3ec25SBjoern A. Zeeb 		val = mt76_rr(dev, addr);
226*cbb3ec25SBjoern A. Zeeb 
227*cbb3ec25SBjoern A. Zeeb 		rssi[0] = to_rssi(GENMASK(7, 0), val);
228*cbb3ec25SBjoern A. Zeeb 		rssi[1] = to_rssi(GENMASK(15, 8), val);
229*cbb3ec25SBjoern A. Zeeb 		rssi[2] = to_rssi(GENMASK(23, 16), val);
230*cbb3ec25SBjoern A. Zeeb 		rssi[3] = to_rssi(GENMASK(31, 14), val);
231*cbb3ec25SBjoern A. Zeeb 
232*cbb3ec25SBjoern A. Zeeb 		msta->ack_signal =
233*cbb3ec25SBjoern A. Zeeb 			mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
234*cbb3ec25SBjoern A. Zeeb 
235*cbb3ec25SBjoern A. Zeeb 		ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
236*cbb3ec25SBjoern A. Zeeb 	}
237*cbb3ec25SBjoern A. Zeeb 
238*cbb3ec25SBjoern A. Zeeb 	rcu_read_unlock();
239*cbb3ec25SBjoern A. Zeeb }
240*cbb3ec25SBjoern A. Zeeb 
241*cbb3ec25SBjoern A. Zeeb void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
242*cbb3ec25SBjoern A. Zeeb 			      struct ieee80211_vif *vif, bool enable)
243*cbb3ec25SBjoern A. Zeeb {
244*cbb3ec25SBjoern A. Zeeb 	struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
245*cbb3ec25SBjoern A. Zeeb 	u32 addr;
246*cbb3ec25SBjoern A. Zeeb 
247*cbb3ec25SBjoern A. Zeeb 	addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
248*cbb3ec25SBjoern A. Zeeb 	if (enable)
249*cbb3ec25SBjoern A. Zeeb 		mt76_set(dev, addr, BIT(5));
250*cbb3ec25SBjoern A. Zeeb 	else
251*cbb3ec25SBjoern A. Zeeb 		mt76_clear(dev, addr, BIT(5));
252*cbb3ec25SBjoern A. Zeeb }
253*cbb3ec25SBjoern A. Zeeb 
254*cbb3ec25SBjoern A. Zeeb void mt7996_mac_set_fixed_rate_table(struct mt7996_dev *dev,
255*cbb3ec25SBjoern A. Zeeb 				     u8 tbl_idx, u16 rate_idx)
256*cbb3ec25SBjoern A. Zeeb {
257*cbb3ec25SBjoern A. Zeeb 	u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx;
258*cbb3ec25SBjoern A. Zeeb 
259*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_WTBL_ITDR0, rate_idx);
260*cbb3ec25SBjoern A. Zeeb 	/* use wtbl spe idx */
261*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL);
262*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_WTBL_ITCR, ctrl);
263*cbb3ec25SBjoern A. Zeeb }
264*cbb3ec25SBjoern A. Zeeb 
265*cbb3ec25SBjoern A. Zeeb /* The HW does not translate the mac header to 802.3 for mesh point */
266*cbb3ec25SBjoern A. Zeeb static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
267*cbb3ec25SBjoern A. Zeeb {
268*cbb3ec25SBjoern A. Zeeb 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
269*cbb3ec25SBjoern A. Zeeb 	struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
270*cbb3ec25SBjoern A. Zeeb 	struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
271*cbb3ec25SBjoern A. Zeeb 	__le32 *rxd = (__le32 *)skb->data;
272*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_sta *sta;
273*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_vif *vif;
274*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_hdr hdr;
275*cbb3ec25SBjoern A. Zeeb 	u16 frame_control;
276*cbb3ec25SBjoern A. Zeeb 
277*cbb3ec25SBjoern A. Zeeb 	if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
278*cbb3ec25SBjoern A. Zeeb 	    MT_RXD3_NORMAL_U2M)
279*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
280*cbb3ec25SBjoern A. Zeeb 
281*cbb3ec25SBjoern A. Zeeb 	if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
282*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
283*cbb3ec25SBjoern A. Zeeb 
284*cbb3ec25SBjoern A. Zeeb 	if (!msta || !msta->vif)
285*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
286*cbb3ec25SBjoern A. Zeeb 
287*cbb3ec25SBjoern A. Zeeb 	sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
288*cbb3ec25SBjoern A. Zeeb 	vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
289*cbb3ec25SBjoern A. Zeeb 
290*cbb3ec25SBjoern A. Zeeb 	/* store the info from RXD and ethhdr to avoid being overridden */
291*cbb3ec25SBjoern A. Zeeb 	frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
292*cbb3ec25SBjoern A. Zeeb 	hdr.frame_control = cpu_to_le16(frame_control);
293*cbb3ec25SBjoern A. Zeeb 	hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
294*cbb3ec25SBjoern A. Zeeb 	hdr.duration_id = 0;
295*cbb3ec25SBjoern A. Zeeb 
296*cbb3ec25SBjoern A. Zeeb 	ether_addr_copy(hdr.addr1, vif->addr);
297*cbb3ec25SBjoern A. Zeeb 	ether_addr_copy(hdr.addr2, sta->addr);
298*cbb3ec25SBjoern A. Zeeb 	switch (frame_control & (IEEE80211_FCTL_TODS |
299*cbb3ec25SBjoern A. Zeeb 				 IEEE80211_FCTL_FROMDS)) {
300*cbb3ec25SBjoern A. Zeeb 	case 0:
301*cbb3ec25SBjoern A. Zeeb 		ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
302*cbb3ec25SBjoern A. Zeeb 		break;
303*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_FCTL_FROMDS:
304*cbb3ec25SBjoern A. Zeeb 		ether_addr_copy(hdr.addr3, eth_hdr->h_source);
305*cbb3ec25SBjoern A. Zeeb 		break;
306*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_FCTL_TODS:
307*cbb3ec25SBjoern A. Zeeb 		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
308*cbb3ec25SBjoern A. Zeeb 		break;
309*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
310*cbb3ec25SBjoern A. Zeeb 		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
311*cbb3ec25SBjoern A. Zeeb 		ether_addr_copy(hdr.addr4, eth_hdr->h_source);
312*cbb3ec25SBjoern A. Zeeb 		break;
313*cbb3ec25SBjoern A. Zeeb 	default:
314*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
315*cbb3ec25SBjoern A. Zeeb 	}
316*cbb3ec25SBjoern A. Zeeb 
317*cbb3ec25SBjoern A. Zeeb 	skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
318*cbb3ec25SBjoern A. Zeeb 	if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
319*cbb3ec25SBjoern A. Zeeb 	    eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
320*cbb3ec25SBjoern A. Zeeb 		ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
321*cbb3ec25SBjoern A. Zeeb 	else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
322*cbb3ec25SBjoern A. Zeeb 		ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
323*cbb3ec25SBjoern A. Zeeb 	else
324*cbb3ec25SBjoern A. Zeeb 		skb_pull(skb, 2);
325*cbb3ec25SBjoern A. Zeeb 
326*cbb3ec25SBjoern A. Zeeb 	if (ieee80211_has_order(hdr.frame_control))
327*cbb3ec25SBjoern A. Zeeb 		memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
328*cbb3ec25SBjoern A. Zeeb 		       IEEE80211_HT_CTL_LEN);
329*cbb3ec25SBjoern A. Zeeb 	if (ieee80211_is_data_qos(hdr.frame_control)) {
330*cbb3ec25SBjoern A. Zeeb 		__le16 qos_ctrl;
331*cbb3ec25SBjoern A. Zeeb 
332*cbb3ec25SBjoern A. Zeeb 		qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
333*cbb3ec25SBjoern A. Zeeb 		memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
334*cbb3ec25SBjoern A. Zeeb 		       IEEE80211_QOS_CTL_LEN);
335*cbb3ec25SBjoern A. Zeeb 	}
336*cbb3ec25SBjoern A. Zeeb 
337*cbb3ec25SBjoern A. Zeeb 	if (ieee80211_has_a4(hdr.frame_control))
338*cbb3ec25SBjoern A. Zeeb 		memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
339*cbb3ec25SBjoern A. Zeeb 	else
340*cbb3ec25SBjoern A. Zeeb 		memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
341*cbb3ec25SBjoern A. Zeeb 
342*cbb3ec25SBjoern A. Zeeb 	return 0;
343*cbb3ec25SBjoern A. Zeeb }
344*cbb3ec25SBjoern A. Zeeb 
345*cbb3ec25SBjoern A. Zeeb static int
346*cbb3ec25SBjoern A. Zeeb mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
347*cbb3ec25SBjoern A. Zeeb 			struct mt76_rx_status *status,
348*cbb3ec25SBjoern A. Zeeb 			struct ieee80211_supported_band *sband,
349*cbb3ec25SBjoern A. Zeeb 			__le32 *rxv, u8 *mode)
350*cbb3ec25SBjoern A. Zeeb {
351*cbb3ec25SBjoern A. Zeeb 	u32 v0, v2;
352*cbb3ec25SBjoern A. Zeeb 	u8 stbc, gi, bw, dcm, nss;
353*cbb3ec25SBjoern A. Zeeb 	int i, idx;
354*cbb3ec25SBjoern A. Zeeb 	bool cck = false;
355*cbb3ec25SBjoern A. Zeeb 
356*cbb3ec25SBjoern A. Zeeb 	v0 = le32_to_cpu(rxv[0]);
357*cbb3ec25SBjoern A. Zeeb 	v2 = le32_to_cpu(rxv[2]);
358*cbb3ec25SBjoern A. Zeeb 
359*cbb3ec25SBjoern A. Zeeb 	idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
360*cbb3ec25SBjoern A. Zeeb 	i = idx;
361*cbb3ec25SBjoern A. Zeeb 	nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
362*cbb3ec25SBjoern A. Zeeb 
363*cbb3ec25SBjoern A. Zeeb 	stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
364*cbb3ec25SBjoern A. Zeeb 	gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
365*cbb3ec25SBjoern A. Zeeb 	*mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
366*cbb3ec25SBjoern A. Zeeb 	dcm = FIELD_GET(MT_PRXV_DCM, v2);
367*cbb3ec25SBjoern A. Zeeb 	bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
368*cbb3ec25SBjoern A. Zeeb 
369*cbb3ec25SBjoern A. Zeeb 	switch (*mode) {
370*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_CCK:
371*cbb3ec25SBjoern A. Zeeb 		cck = true;
372*cbb3ec25SBjoern A. Zeeb 		fallthrough;
373*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_OFDM:
374*cbb3ec25SBjoern A. Zeeb 		i = mt76_get_rate(&dev->mt76, sband, i, cck);
375*cbb3ec25SBjoern A. Zeeb 		break;
376*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HT_GF:
377*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HT:
378*cbb3ec25SBjoern A. Zeeb 		status->encoding = RX_ENC_HT;
379*cbb3ec25SBjoern A. Zeeb 		if (gi)
380*cbb3ec25SBjoern A. Zeeb 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
381*cbb3ec25SBjoern A. Zeeb 		if (i > 31)
382*cbb3ec25SBjoern A. Zeeb 			return -EINVAL;
383*cbb3ec25SBjoern A. Zeeb 		break;
384*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_VHT:
385*cbb3ec25SBjoern A. Zeeb 		status->nss = nss;
386*cbb3ec25SBjoern A. Zeeb 		status->encoding = RX_ENC_VHT;
387*cbb3ec25SBjoern A. Zeeb 		if (gi)
388*cbb3ec25SBjoern A. Zeeb 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
389*cbb3ec25SBjoern A. Zeeb 		if (i > 11)
390*cbb3ec25SBjoern A. Zeeb 			return -EINVAL;
391*cbb3ec25SBjoern A. Zeeb 		break;
392*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HE_MU:
393*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HE_SU:
394*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HE_EXT_SU:
395*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HE_TB:
396*cbb3ec25SBjoern A. Zeeb 		status->nss = nss;
397*cbb3ec25SBjoern A. Zeeb 		status->encoding = RX_ENC_HE;
398*cbb3ec25SBjoern A. Zeeb 		i &= GENMASK(3, 0);
399*cbb3ec25SBjoern A. Zeeb 
400*cbb3ec25SBjoern A. Zeeb 		if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
401*cbb3ec25SBjoern A. Zeeb 			status->he_gi = gi;
402*cbb3ec25SBjoern A. Zeeb 
403*cbb3ec25SBjoern A. Zeeb 		status->he_dcm = dcm;
404*cbb3ec25SBjoern A. Zeeb 		break;
405*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_EHT_SU:
406*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_EHT_TRIG:
407*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_EHT_MU:
408*cbb3ec25SBjoern A. Zeeb 		status->nss = nss;
409*cbb3ec25SBjoern A. Zeeb 		status->encoding = RX_ENC_EHT;
410*cbb3ec25SBjoern A. Zeeb 		i &= GENMASK(3, 0);
411*cbb3ec25SBjoern A. Zeeb 
412*cbb3ec25SBjoern A. Zeeb 		if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
413*cbb3ec25SBjoern A. Zeeb 			status->eht.gi = gi;
414*cbb3ec25SBjoern A. Zeeb 		break;
415*cbb3ec25SBjoern A. Zeeb 	default:
416*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
417*cbb3ec25SBjoern A. Zeeb 	}
418*cbb3ec25SBjoern A. Zeeb 	status->rate_idx = i;
419*cbb3ec25SBjoern A. Zeeb 
420*cbb3ec25SBjoern A. Zeeb 	switch (bw) {
421*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_STA_RX_BW_20:
422*cbb3ec25SBjoern A. Zeeb 		break;
423*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_STA_RX_BW_40:
424*cbb3ec25SBjoern A. Zeeb 		if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
425*cbb3ec25SBjoern A. Zeeb 		    (idx & MT_PRXV_TX_ER_SU_106T)) {
426*cbb3ec25SBjoern A. Zeeb 			status->bw = RATE_INFO_BW_HE_RU;
427*cbb3ec25SBjoern A. Zeeb 			status->he_ru =
428*cbb3ec25SBjoern A. Zeeb 				NL80211_RATE_INFO_HE_RU_ALLOC_106;
429*cbb3ec25SBjoern A. Zeeb 		} else {
430*cbb3ec25SBjoern A. Zeeb 			status->bw = RATE_INFO_BW_40;
431*cbb3ec25SBjoern A. Zeeb 		}
432*cbb3ec25SBjoern A. Zeeb 		break;
433*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_STA_RX_BW_80:
434*cbb3ec25SBjoern A. Zeeb 		status->bw = RATE_INFO_BW_80;
435*cbb3ec25SBjoern A. Zeeb 		break;
436*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_STA_RX_BW_160:
437*cbb3ec25SBjoern A. Zeeb 		status->bw = RATE_INFO_BW_160;
438*cbb3ec25SBjoern A. Zeeb 		break;
439*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_STA_RX_BW_320:
440*cbb3ec25SBjoern A. Zeeb 		status->bw = RATE_INFO_BW_320;
441*cbb3ec25SBjoern A. Zeeb 		break;
442*cbb3ec25SBjoern A. Zeeb 	default:
443*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
444*cbb3ec25SBjoern A. Zeeb 	}
445*cbb3ec25SBjoern A. Zeeb 
446*cbb3ec25SBjoern A. Zeeb 	status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
447*cbb3ec25SBjoern A. Zeeb 	if (*mode < MT_PHY_TYPE_HE_SU && gi)
448*cbb3ec25SBjoern A. Zeeb 		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
449*cbb3ec25SBjoern A. Zeeb 
450*cbb3ec25SBjoern A. Zeeb 	return 0;
451*cbb3ec25SBjoern A. Zeeb }
452*cbb3ec25SBjoern A. Zeeb 
453*cbb3ec25SBjoern A. Zeeb static int
454*cbb3ec25SBjoern A. Zeeb mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
455*cbb3ec25SBjoern A. Zeeb {
456*cbb3ec25SBjoern A. Zeeb 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
457*cbb3ec25SBjoern A. Zeeb 	struct mt76_phy *mphy = &dev->mt76.phy;
458*cbb3ec25SBjoern A. Zeeb 	struct mt7996_phy *phy = &dev->phy;
459*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_supported_band *sband;
460*cbb3ec25SBjoern A. Zeeb 	__le32 *rxd = (__le32 *)skb->data;
461*cbb3ec25SBjoern A. Zeeb 	__le32 *rxv = NULL;
462*cbb3ec25SBjoern A. Zeeb 	u32 rxd0 = le32_to_cpu(rxd[0]);
463*cbb3ec25SBjoern A. Zeeb 	u32 rxd1 = le32_to_cpu(rxd[1]);
464*cbb3ec25SBjoern A. Zeeb 	u32 rxd2 = le32_to_cpu(rxd[2]);
465*cbb3ec25SBjoern A. Zeeb 	u32 rxd3 = le32_to_cpu(rxd[3]);
466*cbb3ec25SBjoern A. Zeeb 	u32 rxd4 = le32_to_cpu(rxd[4]);
467*cbb3ec25SBjoern A. Zeeb 	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
468*cbb3ec25SBjoern A. Zeeb 	u32 csum_status = *(u32 *)skb->cb;
469*cbb3ec25SBjoern A. Zeeb 	u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
470*cbb3ec25SBjoern A. Zeeb 	bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
471*cbb3ec25SBjoern A. Zeeb 	bool unicast, insert_ccmp_hdr = false;
472*cbb3ec25SBjoern A. Zeeb 	u8 remove_pad, amsdu_info, band_idx;
473*cbb3ec25SBjoern A. Zeeb 	u8 mode = 0, qos_ctl = 0;
474*cbb3ec25SBjoern A. Zeeb 	bool hdr_trans;
475*cbb3ec25SBjoern A. Zeeb 	u16 hdr_gap;
476*cbb3ec25SBjoern A. Zeeb 	u16 seq_ctrl = 0;
477*cbb3ec25SBjoern A. Zeeb 	__le16 fc = 0;
478*cbb3ec25SBjoern A. Zeeb 	int idx;
479*cbb3ec25SBjoern A. Zeeb 
480*cbb3ec25SBjoern A. Zeeb 	memset(status, 0, sizeof(*status));
481*cbb3ec25SBjoern A. Zeeb 
482*cbb3ec25SBjoern A. Zeeb 	band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
483*cbb3ec25SBjoern A. Zeeb 	mphy = dev->mt76.phys[band_idx];
484*cbb3ec25SBjoern A. Zeeb 	phy = mphy->priv;
485*cbb3ec25SBjoern A. Zeeb 	status->phy_idx = mphy->band_idx;
486*cbb3ec25SBjoern A. Zeeb 
487*cbb3ec25SBjoern A. Zeeb 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
488*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
489*cbb3ec25SBjoern A. Zeeb 
490*cbb3ec25SBjoern A. Zeeb 	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
491*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
492*cbb3ec25SBjoern A. Zeeb 
493*cbb3ec25SBjoern A. Zeeb 	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
494*cbb3ec25SBjoern A. Zeeb 	if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
495*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
496*cbb3ec25SBjoern A. Zeeb 
497*cbb3ec25SBjoern A. Zeeb 	/* ICV error or CCMP/BIP/WPI MIC error */
498*cbb3ec25SBjoern A. Zeeb 	if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
499*cbb3ec25SBjoern A. Zeeb 		status->flag |= RX_FLAG_ONLY_MONITOR;
500*cbb3ec25SBjoern A. Zeeb 
501*cbb3ec25SBjoern A. Zeeb 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
502*cbb3ec25SBjoern A. Zeeb 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
503*cbb3ec25SBjoern A. Zeeb 	status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
504*cbb3ec25SBjoern A. Zeeb 
505*cbb3ec25SBjoern A. Zeeb 	if (status->wcid) {
506*cbb3ec25SBjoern A. Zeeb 		struct mt7996_sta *msta;
507*cbb3ec25SBjoern A. Zeeb 
508*cbb3ec25SBjoern A. Zeeb 		msta = container_of(status->wcid, struct mt7996_sta, wcid);
509*cbb3ec25SBjoern A. Zeeb 		spin_lock_bh(&dev->mt76.sta_poll_lock);
510*cbb3ec25SBjoern A. Zeeb 		if (list_empty(&msta->wcid.poll_list))
511*cbb3ec25SBjoern A. Zeeb 			list_add_tail(&msta->wcid.poll_list,
512*cbb3ec25SBjoern A. Zeeb 				      &dev->mt76.sta_poll_list);
513*cbb3ec25SBjoern A. Zeeb 		spin_unlock_bh(&dev->mt76.sta_poll_lock);
514*cbb3ec25SBjoern A. Zeeb 	}
515*cbb3ec25SBjoern A. Zeeb 
516*cbb3ec25SBjoern A. Zeeb 	status->freq = mphy->chandef.chan->center_freq;
517*cbb3ec25SBjoern A. Zeeb 	status->band = mphy->chandef.chan->band;
518*cbb3ec25SBjoern A. Zeeb 	if (status->band == NL80211_BAND_5GHZ)
519*cbb3ec25SBjoern A. Zeeb 		sband = &mphy->sband_5g.sband;
520*cbb3ec25SBjoern A. Zeeb 	else if (status->band == NL80211_BAND_6GHZ)
521*cbb3ec25SBjoern A. Zeeb 		sband = &mphy->sband_6g.sband;
522*cbb3ec25SBjoern A. Zeeb 	else
523*cbb3ec25SBjoern A. Zeeb 		sband = &mphy->sband_2g.sband;
524*cbb3ec25SBjoern A. Zeeb 
525*cbb3ec25SBjoern A. Zeeb 	if (!sband->channels)
526*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
527*cbb3ec25SBjoern A. Zeeb 
528*cbb3ec25SBjoern A. Zeeb 	if ((rxd0 & csum_mask) == csum_mask &&
529*cbb3ec25SBjoern A. Zeeb 	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
530*cbb3ec25SBjoern A. Zeeb 		skb->ip_summed = CHECKSUM_UNNECESSARY;
531*cbb3ec25SBjoern A. Zeeb 
532*cbb3ec25SBjoern A. Zeeb 	if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
533*cbb3ec25SBjoern A. Zeeb 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
534*cbb3ec25SBjoern A. Zeeb 
535*cbb3ec25SBjoern A. Zeeb 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
536*cbb3ec25SBjoern A. Zeeb 		status->flag |= RX_FLAG_MMIC_ERROR;
537*cbb3ec25SBjoern A. Zeeb 
538*cbb3ec25SBjoern A. Zeeb 	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
539*cbb3ec25SBjoern A. Zeeb 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
540*cbb3ec25SBjoern A. Zeeb 		status->flag |= RX_FLAG_DECRYPTED;
541*cbb3ec25SBjoern A. Zeeb 		status->flag |= RX_FLAG_IV_STRIPPED;
542*cbb3ec25SBjoern A. Zeeb 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
543*cbb3ec25SBjoern A. Zeeb 	}
544*cbb3ec25SBjoern A. Zeeb 
545*cbb3ec25SBjoern A. Zeeb 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
546*cbb3ec25SBjoern A. Zeeb 
547*cbb3ec25SBjoern A. Zeeb 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
548*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
549*cbb3ec25SBjoern A. Zeeb 
550*cbb3ec25SBjoern A. Zeeb 	rxd += 8;
551*cbb3ec25SBjoern A. Zeeb 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
552*cbb3ec25SBjoern A. Zeeb 		u32 v0 = le32_to_cpu(rxd[0]);
553*cbb3ec25SBjoern A. Zeeb 		u32 v2 = le32_to_cpu(rxd[2]);
554*cbb3ec25SBjoern A. Zeeb 
555*cbb3ec25SBjoern A. Zeeb 		fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
556*cbb3ec25SBjoern A. Zeeb 		qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
557*cbb3ec25SBjoern A. Zeeb 		seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
558*cbb3ec25SBjoern A. Zeeb 
559*cbb3ec25SBjoern A. Zeeb 		rxd += 4;
560*cbb3ec25SBjoern A. Zeeb 		if ((u8 *)rxd - skb->data >= skb->len)
561*cbb3ec25SBjoern A. Zeeb 			return -EINVAL;
562*cbb3ec25SBjoern A. Zeeb 	}
563*cbb3ec25SBjoern A. Zeeb 
564*cbb3ec25SBjoern A. Zeeb 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
565*cbb3ec25SBjoern A. Zeeb 		u8 *data = (u8 *)rxd;
566*cbb3ec25SBjoern A. Zeeb 
567*cbb3ec25SBjoern A. Zeeb 		if (status->flag & RX_FLAG_DECRYPTED) {
568*cbb3ec25SBjoern A. Zeeb 			switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
569*cbb3ec25SBjoern A. Zeeb 			case MT_CIPHER_AES_CCMP:
570*cbb3ec25SBjoern A. Zeeb 			case MT_CIPHER_CCMP_CCX:
571*cbb3ec25SBjoern A. Zeeb 			case MT_CIPHER_CCMP_256:
572*cbb3ec25SBjoern A. Zeeb 				insert_ccmp_hdr =
573*cbb3ec25SBjoern A. Zeeb 					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
574*cbb3ec25SBjoern A. Zeeb 				fallthrough;
575*cbb3ec25SBjoern A. Zeeb 			case MT_CIPHER_TKIP:
576*cbb3ec25SBjoern A. Zeeb 			case MT_CIPHER_TKIP_NO_MIC:
577*cbb3ec25SBjoern A. Zeeb 			case MT_CIPHER_GCMP:
578*cbb3ec25SBjoern A. Zeeb 			case MT_CIPHER_GCMP_256:
579*cbb3ec25SBjoern A. Zeeb 				status->iv[0] = data[5];
580*cbb3ec25SBjoern A. Zeeb 				status->iv[1] = data[4];
581*cbb3ec25SBjoern A. Zeeb 				status->iv[2] = data[3];
582*cbb3ec25SBjoern A. Zeeb 				status->iv[3] = data[2];
583*cbb3ec25SBjoern A. Zeeb 				status->iv[4] = data[1];
584*cbb3ec25SBjoern A. Zeeb 				status->iv[5] = data[0];
585*cbb3ec25SBjoern A. Zeeb 				break;
586*cbb3ec25SBjoern A. Zeeb 			default:
587*cbb3ec25SBjoern A. Zeeb 				break;
588*cbb3ec25SBjoern A. Zeeb 			}
589*cbb3ec25SBjoern A. Zeeb 		}
590*cbb3ec25SBjoern A. Zeeb 		rxd += 4;
591*cbb3ec25SBjoern A. Zeeb 		if ((u8 *)rxd - skb->data >= skb->len)
592*cbb3ec25SBjoern A. Zeeb 			return -EINVAL;
593*cbb3ec25SBjoern A. Zeeb 	}
594*cbb3ec25SBjoern A. Zeeb 
595*cbb3ec25SBjoern A. Zeeb 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
596*cbb3ec25SBjoern A. Zeeb 		status->timestamp = le32_to_cpu(rxd[0]);
597*cbb3ec25SBjoern A. Zeeb 		status->flag |= RX_FLAG_MACTIME_START;
598*cbb3ec25SBjoern A. Zeeb 
599*cbb3ec25SBjoern A. Zeeb 		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
600*cbb3ec25SBjoern A. Zeeb 			status->flag |= RX_FLAG_AMPDU_DETAILS;
601*cbb3ec25SBjoern A. Zeeb 
602*cbb3ec25SBjoern A. Zeeb 			/* all subframes of an A-MPDU have the same timestamp */
603*cbb3ec25SBjoern A. Zeeb 			if (phy->rx_ampdu_ts != status->timestamp) {
604*cbb3ec25SBjoern A. Zeeb 				if (!++phy->ampdu_ref)
605*cbb3ec25SBjoern A. Zeeb 					phy->ampdu_ref++;
606*cbb3ec25SBjoern A. Zeeb 			}
607*cbb3ec25SBjoern A. Zeeb 			phy->rx_ampdu_ts = status->timestamp;
608*cbb3ec25SBjoern A. Zeeb 
609*cbb3ec25SBjoern A. Zeeb 			status->ampdu_ref = phy->ampdu_ref;
610*cbb3ec25SBjoern A. Zeeb 		}
611*cbb3ec25SBjoern A. Zeeb 
612*cbb3ec25SBjoern A. Zeeb 		rxd += 4;
613*cbb3ec25SBjoern A. Zeeb 		if ((u8 *)rxd - skb->data >= skb->len)
614*cbb3ec25SBjoern A. Zeeb 			return -EINVAL;
615*cbb3ec25SBjoern A. Zeeb 	}
616*cbb3ec25SBjoern A. Zeeb 
617*cbb3ec25SBjoern A. Zeeb 	/* RXD Group 3 - P-RXV */
618*cbb3ec25SBjoern A. Zeeb 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
619*cbb3ec25SBjoern A. Zeeb 		u32 v3;
620*cbb3ec25SBjoern A. Zeeb 		int ret;
621*cbb3ec25SBjoern A. Zeeb 
622*cbb3ec25SBjoern A. Zeeb 		rxv = rxd;
623*cbb3ec25SBjoern A. Zeeb 		rxd += 4;
624*cbb3ec25SBjoern A. Zeeb 		if ((u8 *)rxd - skb->data >= skb->len)
625*cbb3ec25SBjoern A. Zeeb 			return -EINVAL;
626*cbb3ec25SBjoern A. Zeeb 
627*cbb3ec25SBjoern A. Zeeb 		v3 = le32_to_cpu(rxv[3]);
628*cbb3ec25SBjoern A. Zeeb 
629*cbb3ec25SBjoern A. Zeeb 		status->chains = mphy->antenna_mask;
630*cbb3ec25SBjoern A. Zeeb 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
631*cbb3ec25SBjoern A. Zeeb 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
632*cbb3ec25SBjoern A. Zeeb 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
633*cbb3ec25SBjoern A. Zeeb 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
634*cbb3ec25SBjoern A. Zeeb 
635*cbb3ec25SBjoern A. Zeeb 		/* RXD Group 5 - C-RXV */
636*cbb3ec25SBjoern A. Zeeb 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
637*cbb3ec25SBjoern A. Zeeb 			rxd += 24;
638*cbb3ec25SBjoern A. Zeeb 			if ((u8 *)rxd - skb->data >= skb->len)
639*cbb3ec25SBjoern A. Zeeb 				return -EINVAL;
640*cbb3ec25SBjoern A. Zeeb 		}
641*cbb3ec25SBjoern A. Zeeb 
642*cbb3ec25SBjoern A. Zeeb 		ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
643*cbb3ec25SBjoern A. Zeeb 		if (ret < 0)
644*cbb3ec25SBjoern A. Zeeb 			return ret;
645*cbb3ec25SBjoern A. Zeeb 	}
646*cbb3ec25SBjoern A. Zeeb 
647*cbb3ec25SBjoern A. Zeeb 	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
648*cbb3ec25SBjoern A. Zeeb 	status->amsdu = !!amsdu_info;
649*cbb3ec25SBjoern A. Zeeb 	if (status->amsdu) {
650*cbb3ec25SBjoern A. Zeeb 		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
651*cbb3ec25SBjoern A. Zeeb 		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
652*cbb3ec25SBjoern A. Zeeb 	}
653*cbb3ec25SBjoern A. Zeeb 
654*cbb3ec25SBjoern A. Zeeb 	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
655*cbb3ec25SBjoern A. Zeeb 	if (hdr_trans && ieee80211_has_morefrags(fc)) {
656*cbb3ec25SBjoern A. Zeeb 		if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
657*cbb3ec25SBjoern A. Zeeb 			return -EINVAL;
658*cbb3ec25SBjoern A. Zeeb 		hdr_trans = false;
659*cbb3ec25SBjoern A. Zeeb 	} else {
660*cbb3ec25SBjoern A. Zeeb 		int pad_start = 0;
661*cbb3ec25SBjoern A. Zeeb 
662*cbb3ec25SBjoern A. Zeeb 		skb_pull(skb, hdr_gap);
663*cbb3ec25SBjoern A. Zeeb 		if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
664*cbb3ec25SBjoern A. Zeeb 			pad_start = ieee80211_get_hdrlen_from_skb(skb);
665*cbb3ec25SBjoern A. Zeeb 		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
666*cbb3ec25SBjoern A. Zeeb 			/* When header translation failure is indicated,
667*cbb3ec25SBjoern A. Zeeb 			 * the hardware will insert an extra 2-byte field
668*cbb3ec25SBjoern A. Zeeb 			 * containing the data length after the protocol
669*cbb3ec25SBjoern A. Zeeb 			 * type field. This happens either when the LLC-SNAP
670*cbb3ec25SBjoern A. Zeeb 			 * pattern did not match, or if a VLAN header was
671*cbb3ec25SBjoern A. Zeeb 			 * detected.
672*cbb3ec25SBjoern A. Zeeb 			 */
673*cbb3ec25SBjoern A. Zeeb 			pad_start = 12;
674*cbb3ec25SBjoern A. Zeeb 			if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
675*cbb3ec25SBjoern A. Zeeb 				pad_start += 4;
676*cbb3ec25SBjoern A. Zeeb 			else
677*cbb3ec25SBjoern A. Zeeb 				pad_start = 0;
678*cbb3ec25SBjoern A. Zeeb 		}
679*cbb3ec25SBjoern A. Zeeb 
680*cbb3ec25SBjoern A. Zeeb 		if (pad_start) {
681*cbb3ec25SBjoern A. Zeeb 			memmove(skb->data + 2, skb->data, pad_start);
682*cbb3ec25SBjoern A. Zeeb 			skb_pull(skb, 2);
683*cbb3ec25SBjoern A. Zeeb 		}
684*cbb3ec25SBjoern A. Zeeb 	}
685*cbb3ec25SBjoern A. Zeeb 
686*cbb3ec25SBjoern A. Zeeb 	if (!hdr_trans) {
687*cbb3ec25SBjoern A. Zeeb 		struct ieee80211_hdr *hdr;
688*cbb3ec25SBjoern A. Zeeb 
689*cbb3ec25SBjoern A. Zeeb 		if (insert_ccmp_hdr) {
690*cbb3ec25SBjoern A. Zeeb 			u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
691*cbb3ec25SBjoern A. Zeeb 
692*cbb3ec25SBjoern A. Zeeb 			mt76_insert_ccmp_hdr(skb, key_id);
693*cbb3ec25SBjoern A. Zeeb 		}
694*cbb3ec25SBjoern A. Zeeb 
695*cbb3ec25SBjoern A. Zeeb 		hdr = mt76_skb_get_hdr(skb);
696*cbb3ec25SBjoern A. Zeeb 		fc = hdr->frame_control;
697*cbb3ec25SBjoern A. Zeeb 		if (ieee80211_is_data_qos(fc)) {
698*cbb3ec25SBjoern A. Zeeb 			u8 *qos = ieee80211_get_qos_ctl(hdr);
699*cbb3ec25SBjoern A. Zeeb 
700*cbb3ec25SBjoern A. Zeeb 			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
701*cbb3ec25SBjoern A. Zeeb 			qos_ctl = *qos;
702*cbb3ec25SBjoern A. Zeeb 
703*cbb3ec25SBjoern A. Zeeb 			/* Mesh DA/SA/Length will be stripped after hardware
704*cbb3ec25SBjoern A. Zeeb 			 * de-amsdu, so here needs to clear amsdu present bit
705*cbb3ec25SBjoern A. Zeeb 			 * to mark it as a normal mesh frame.
706*cbb3ec25SBjoern A. Zeeb 			 */
707*cbb3ec25SBjoern A. Zeeb 			if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
708*cbb3ec25SBjoern A. Zeeb 				*qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
709*cbb3ec25SBjoern A. Zeeb 		}
710*cbb3ec25SBjoern A. Zeeb 	} else {
711*cbb3ec25SBjoern A. Zeeb 		status->flag |= RX_FLAG_8023;
712*cbb3ec25SBjoern A. Zeeb 	}
713*cbb3ec25SBjoern A. Zeeb 
714*cbb3ec25SBjoern A. Zeeb 	if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
715*cbb3ec25SBjoern A. Zeeb 		mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
716*cbb3ec25SBjoern A. Zeeb 
717*cbb3ec25SBjoern A. Zeeb 	if (!status->wcid || !ieee80211_is_data_qos(fc))
718*cbb3ec25SBjoern A. Zeeb 		return 0;
719*cbb3ec25SBjoern A. Zeeb 
720*cbb3ec25SBjoern A. Zeeb 	status->aggr = unicast &&
721*cbb3ec25SBjoern A. Zeeb 		       !ieee80211_is_qos_nullfunc(fc);
722*cbb3ec25SBjoern A. Zeeb 	status->qos_ctl = qos_ctl;
723*cbb3ec25SBjoern A. Zeeb 	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
724*cbb3ec25SBjoern A. Zeeb 
725*cbb3ec25SBjoern A. Zeeb 	return 0;
726*cbb3ec25SBjoern A. Zeeb }
727*cbb3ec25SBjoern A. Zeeb 
728*cbb3ec25SBjoern A. Zeeb static void
729*cbb3ec25SBjoern A. Zeeb mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
730*cbb3ec25SBjoern A. Zeeb 			   struct sk_buff *skb, struct mt76_wcid *wcid)
731*cbb3ec25SBjoern A. Zeeb {
732*cbb3ec25SBjoern A. Zeeb 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
733*cbb3ec25SBjoern A. Zeeb 	u8 fc_type, fc_stype;
734*cbb3ec25SBjoern A. Zeeb 	u16 ethertype;
735*cbb3ec25SBjoern A. Zeeb 	bool wmm = false;
736*cbb3ec25SBjoern A. Zeeb 	u32 val;
737*cbb3ec25SBjoern A. Zeeb 
738*cbb3ec25SBjoern A. Zeeb 	if (wcid->sta) {
739*cbb3ec25SBjoern A. Zeeb 		struct ieee80211_sta *sta;
740*cbb3ec25SBjoern A. Zeeb 
741*cbb3ec25SBjoern A. Zeeb 		sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
742*cbb3ec25SBjoern A. Zeeb 		wmm = sta->wme;
743*cbb3ec25SBjoern A. Zeeb 	}
744*cbb3ec25SBjoern A. Zeeb 
745*cbb3ec25SBjoern A. Zeeb 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
746*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD1_TID, tid);
747*cbb3ec25SBjoern A. Zeeb 
748*cbb3ec25SBjoern A. Zeeb 	ethertype = get_unaligned_be16(&skb->data[12]);
749*cbb3ec25SBjoern A. Zeeb 	if (ethertype >= ETH_P_802_3_MIN)
750*cbb3ec25SBjoern A. Zeeb 		val |= MT_TXD1_ETH_802_3;
751*cbb3ec25SBjoern A. Zeeb 
752*cbb3ec25SBjoern A. Zeeb 	txwi[1] |= cpu_to_le32(val);
753*cbb3ec25SBjoern A. Zeeb 
754*cbb3ec25SBjoern A. Zeeb 	fc_type = IEEE80211_FTYPE_DATA >> 2;
755*cbb3ec25SBjoern A. Zeeb 	fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
756*cbb3ec25SBjoern A. Zeeb 
757*cbb3ec25SBjoern A. Zeeb 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
758*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
759*cbb3ec25SBjoern A. Zeeb 
760*cbb3ec25SBjoern A. Zeeb 	txwi[2] |= cpu_to_le32(val);
761*cbb3ec25SBjoern A. Zeeb }
762*cbb3ec25SBjoern A. Zeeb 
763*cbb3ec25SBjoern A. Zeeb static void
764*cbb3ec25SBjoern A. Zeeb mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
765*cbb3ec25SBjoern A. Zeeb 			    struct sk_buff *skb, struct ieee80211_key_conf *key)
766*cbb3ec25SBjoern A. Zeeb {
767*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
768*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
769*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
770*cbb3ec25SBjoern A. Zeeb 	bool multicast = is_multicast_ether_addr(hdr->addr1);
771*cbb3ec25SBjoern A. Zeeb 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
772*cbb3ec25SBjoern A. Zeeb 	__le16 fc = hdr->frame_control;
773*cbb3ec25SBjoern A. Zeeb 	u8 fc_type, fc_stype;
774*cbb3ec25SBjoern A. Zeeb 	u32 val;
775*cbb3ec25SBjoern A. Zeeb 
776*cbb3ec25SBjoern A. Zeeb 	if (ieee80211_is_action(fc) &&
777*cbb3ec25SBjoern A. Zeeb 	    mgmt->u.action.category == WLAN_CATEGORY_BACK &&
778*cbb3ec25SBjoern A. Zeeb 	    mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
779*cbb3ec25SBjoern A. Zeeb 		tid = MT_TX_ADDBA;
780*cbb3ec25SBjoern A. Zeeb 	else if (ieee80211_is_mgmt(hdr->frame_control))
781*cbb3ec25SBjoern A. Zeeb 		tid = MT_TX_NORMAL;
782*cbb3ec25SBjoern A. Zeeb 
783*cbb3ec25SBjoern A. Zeeb 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
784*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD1_HDR_INFO,
785*cbb3ec25SBjoern A. Zeeb 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
786*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD1_TID, tid);
787*cbb3ec25SBjoern A. Zeeb 
788*cbb3ec25SBjoern A. Zeeb 	if (!ieee80211_is_data(fc) || multicast ||
789*cbb3ec25SBjoern A. Zeeb 	    info->flags & IEEE80211_TX_CTL_USE_MINRATE)
790*cbb3ec25SBjoern A. Zeeb 		val |= MT_TXD1_FIXED_RATE;
791*cbb3ec25SBjoern A. Zeeb 
792*cbb3ec25SBjoern A. Zeeb 	if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
793*cbb3ec25SBjoern A. Zeeb 	    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
794*cbb3ec25SBjoern A. Zeeb 		val |= MT_TXD1_BIP;
795*cbb3ec25SBjoern A. Zeeb 		txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
796*cbb3ec25SBjoern A. Zeeb 	}
797*cbb3ec25SBjoern A. Zeeb 
798*cbb3ec25SBjoern A. Zeeb 	txwi[1] |= cpu_to_le32(val);
799*cbb3ec25SBjoern A. Zeeb 
800*cbb3ec25SBjoern A. Zeeb 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
801*cbb3ec25SBjoern A. Zeeb 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
802*cbb3ec25SBjoern A. Zeeb 
803*cbb3ec25SBjoern A. Zeeb 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
804*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
805*cbb3ec25SBjoern A. Zeeb 
806*cbb3ec25SBjoern A. Zeeb 	txwi[2] |= cpu_to_le32(val);
807*cbb3ec25SBjoern A. Zeeb 
808*cbb3ec25SBjoern A. Zeeb 	txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
809*cbb3ec25SBjoern A. Zeeb 	if (ieee80211_is_beacon(fc)) {
810*cbb3ec25SBjoern A. Zeeb 		txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
811*cbb3ec25SBjoern A. Zeeb 		txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
812*cbb3ec25SBjoern A. Zeeb 	}
813*cbb3ec25SBjoern A. Zeeb 
814*cbb3ec25SBjoern A. Zeeb 	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
815*cbb3ec25SBjoern A. Zeeb 		u16 seqno = le16_to_cpu(hdr->seq_ctrl);
816*cbb3ec25SBjoern A. Zeeb 
817*cbb3ec25SBjoern A. Zeeb 		if (ieee80211_is_back_req(hdr->frame_control)) {
818*cbb3ec25SBjoern A. Zeeb 			struct ieee80211_bar *bar;
819*cbb3ec25SBjoern A. Zeeb 
820*cbb3ec25SBjoern A. Zeeb 			bar = (struct ieee80211_bar *)skb->data;
821*cbb3ec25SBjoern A. Zeeb 			seqno = le16_to_cpu(bar->start_seq_num);
822*cbb3ec25SBjoern A. Zeeb 		}
823*cbb3ec25SBjoern A. Zeeb 
824*cbb3ec25SBjoern A. Zeeb 		val = MT_TXD3_SN_VALID |
825*cbb3ec25SBjoern A. Zeeb 		      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
826*cbb3ec25SBjoern A. Zeeb 		txwi[3] |= cpu_to_le32(val);
827*cbb3ec25SBjoern A. Zeeb 		txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
828*cbb3ec25SBjoern A. Zeeb 	}
829*cbb3ec25SBjoern A. Zeeb }
830*cbb3ec25SBjoern A. Zeeb 
831*cbb3ec25SBjoern A. Zeeb void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
832*cbb3ec25SBjoern A. Zeeb 			   struct sk_buff *skb, struct mt76_wcid *wcid,
833*cbb3ec25SBjoern A. Zeeb 			   struct ieee80211_key_conf *key, int pid,
834*cbb3ec25SBjoern A. Zeeb 			   enum mt76_txq_id qid, u32 changed)
835*cbb3ec25SBjoern A. Zeeb {
836*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
837*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_vif *vif = info->control.vif;
838*cbb3ec25SBjoern A. Zeeb 	u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
839*cbb3ec25SBjoern A. Zeeb 	u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
840*cbb3ec25SBjoern A. Zeeb 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
841*cbb3ec25SBjoern A. Zeeb 	struct mt76_vif *mvif;
842*cbb3ec25SBjoern A. Zeeb 	u16 tx_count = 15;
843*cbb3ec25SBjoern A. Zeeb 	u32 val;
844*cbb3ec25SBjoern A. Zeeb 	bool beacon = !!(changed & (BSS_CHANGED_BEACON |
845*cbb3ec25SBjoern A. Zeeb 				    BSS_CHANGED_BEACON_ENABLED));
846*cbb3ec25SBjoern A. Zeeb 	bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
847*cbb3ec25SBjoern A. Zeeb 					 BSS_CHANGED_FILS_DISCOVERY));
848*cbb3ec25SBjoern A. Zeeb 
849*cbb3ec25SBjoern A. Zeeb 	mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
850*cbb3ec25SBjoern A. Zeeb 	if (mvif) {
851*cbb3ec25SBjoern A. Zeeb 		omac_idx = mvif->omac_idx;
852*cbb3ec25SBjoern A. Zeeb 		wmm_idx = mvif->wmm_idx;
853*cbb3ec25SBjoern A. Zeeb 		band_idx = mvif->band_idx;
854*cbb3ec25SBjoern A. Zeeb 	}
855*cbb3ec25SBjoern A. Zeeb 
856*cbb3ec25SBjoern A. Zeeb 	if (inband_disc) {
857*cbb3ec25SBjoern A. Zeeb 		p_fmt = MT_TX_TYPE_FW;
858*cbb3ec25SBjoern A. Zeeb 		q_idx = MT_LMAC_ALTX0;
859*cbb3ec25SBjoern A. Zeeb 	} else if (beacon) {
860*cbb3ec25SBjoern A. Zeeb 		p_fmt = MT_TX_TYPE_FW;
861*cbb3ec25SBjoern A. Zeeb 		q_idx = MT_LMAC_BCN0;
862*cbb3ec25SBjoern A. Zeeb 	} else if (qid >= MT_TXQ_PSD) {
863*cbb3ec25SBjoern A. Zeeb 		p_fmt = MT_TX_TYPE_CT;
864*cbb3ec25SBjoern A. Zeeb 		q_idx = MT_LMAC_ALTX0;
865*cbb3ec25SBjoern A. Zeeb 	} else {
866*cbb3ec25SBjoern A. Zeeb 		p_fmt = MT_TX_TYPE_CT;
867*cbb3ec25SBjoern A. Zeeb 		q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
868*cbb3ec25SBjoern A. Zeeb 			mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
869*cbb3ec25SBjoern A. Zeeb 	}
870*cbb3ec25SBjoern A. Zeeb 
871*cbb3ec25SBjoern A. Zeeb 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
872*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
873*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
874*cbb3ec25SBjoern A. Zeeb 	txwi[0] = cpu_to_le32(val);
875*cbb3ec25SBjoern A. Zeeb 
876*cbb3ec25SBjoern A. Zeeb 	val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
877*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
878*cbb3ec25SBjoern A. Zeeb 
879*cbb3ec25SBjoern A. Zeeb 	if (band_idx)
880*cbb3ec25SBjoern A. Zeeb 		val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
881*cbb3ec25SBjoern A. Zeeb 
882*cbb3ec25SBjoern A. Zeeb 	txwi[1] = cpu_to_le32(val);
883*cbb3ec25SBjoern A. Zeeb 	txwi[2] = 0;
884*cbb3ec25SBjoern A. Zeeb 
885*cbb3ec25SBjoern A. Zeeb 	val = MT_TXD3_SW_POWER_MGMT |
886*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
887*cbb3ec25SBjoern A. Zeeb 	if (key)
888*cbb3ec25SBjoern A. Zeeb 		val |= MT_TXD3_PROTECT_FRAME;
889*cbb3ec25SBjoern A. Zeeb 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
890*cbb3ec25SBjoern A. Zeeb 		val |= MT_TXD3_NO_ACK;
891*cbb3ec25SBjoern A. Zeeb 	if (wcid->amsdu)
892*cbb3ec25SBjoern A. Zeeb 		val |= MT_TXD3_HW_AMSDU;
893*cbb3ec25SBjoern A. Zeeb 
894*cbb3ec25SBjoern A. Zeeb 	txwi[3] = cpu_to_le32(val);
895*cbb3ec25SBjoern A. Zeeb 	txwi[4] = 0;
896*cbb3ec25SBjoern A. Zeeb 
897*cbb3ec25SBjoern A. Zeeb 	val = FIELD_PREP(MT_TXD5_PID, pid);
898*cbb3ec25SBjoern A. Zeeb 	if (pid >= MT_PACKET_ID_FIRST)
899*cbb3ec25SBjoern A. Zeeb 		val |= MT_TXD5_TX_STATUS_HOST;
900*cbb3ec25SBjoern A. Zeeb 	txwi[5] = cpu_to_le32(val);
901*cbb3ec25SBjoern A. Zeeb 
902*cbb3ec25SBjoern A. Zeeb 	val = MT_TXD6_DIS_MAT | MT_TXD6_DAS |
903*cbb3ec25SBjoern A. Zeeb 	      FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
904*cbb3ec25SBjoern A. Zeeb 	txwi[6] = cpu_to_le32(val);
905*cbb3ec25SBjoern A. Zeeb 	txwi[7] = 0;
906*cbb3ec25SBjoern A. Zeeb 
907*cbb3ec25SBjoern A. Zeeb 	if (is_8023)
908*cbb3ec25SBjoern A. Zeeb 		mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
909*cbb3ec25SBjoern A. Zeeb 	else
910*cbb3ec25SBjoern A. Zeeb 		mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
911*cbb3ec25SBjoern A. Zeeb 
912*cbb3ec25SBjoern A. Zeeb 	if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
913*cbb3ec25SBjoern A. Zeeb 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
914*cbb3ec25SBjoern A. Zeeb 		bool mcast = ieee80211_is_data(hdr->frame_control) &&
915*cbb3ec25SBjoern A. Zeeb 			     is_multicast_ether_addr(hdr->addr1);
916*cbb3ec25SBjoern A. Zeeb 		u8 idx = MT7996_BASIC_RATES_TBL;
917*cbb3ec25SBjoern A. Zeeb 
918*cbb3ec25SBjoern A. Zeeb 		if (mvif) {
919*cbb3ec25SBjoern A. Zeeb 			if (mcast && mvif->mcast_rates_idx)
920*cbb3ec25SBjoern A. Zeeb 				idx = mvif->mcast_rates_idx;
921*cbb3ec25SBjoern A. Zeeb 			else if (beacon && mvif->beacon_rates_idx)
922*cbb3ec25SBjoern A. Zeeb 				idx = mvif->beacon_rates_idx;
923*cbb3ec25SBjoern A. Zeeb 			else
924*cbb3ec25SBjoern A. Zeeb 				idx = mvif->basic_rates_idx;
925*cbb3ec25SBjoern A. Zeeb 		}
926*cbb3ec25SBjoern A. Zeeb 
927*cbb3ec25SBjoern A. Zeeb 		txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
928*cbb3ec25SBjoern A. Zeeb 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
929*cbb3ec25SBjoern A. Zeeb 	}
930*cbb3ec25SBjoern A. Zeeb }
931*cbb3ec25SBjoern A. Zeeb 
932*cbb3ec25SBjoern A. Zeeb int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
933*cbb3ec25SBjoern A. Zeeb 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
934*cbb3ec25SBjoern A. Zeeb 			  struct ieee80211_sta *sta,
935*cbb3ec25SBjoern A. Zeeb 			  struct mt76_tx_info *tx_info)
936*cbb3ec25SBjoern A. Zeeb {
937*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
938*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
939*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
940*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_key_conf *key = info->control.hw_key;
941*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_vif *vif = info->control.vif;
942*cbb3ec25SBjoern A. Zeeb 	struct mt76_connac_txp_common *txp;
943*cbb3ec25SBjoern A. Zeeb 	struct mt76_txwi_cache *t;
944*cbb3ec25SBjoern A. Zeeb 	int id, i, pid, nbuf = tx_info->nbuf - 1;
945*cbb3ec25SBjoern A. Zeeb 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
946*cbb3ec25SBjoern A. Zeeb 	u8 *txwi = (u8 *)txwi_ptr;
947*cbb3ec25SBjoern A. Zeeb 
948*cbb3ec25SBjoern A. Zeeb 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
949*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
950*cbb3ec25SBjoern A. Zeeb 
951*cbb3ec25SBjoern A. Zeeb 	if (!wcid)
952*cbb3ec25SBjoern A. Zeeb 		wcid = &dev->mt76.global_wcid;
953*cbb3ec25SBjoern A. Zeeb 
954*cbb3ec25SBjoern A. Zeeb 	if (sta) {
955*cbb3ec25SBjoern A. Zeeb 		struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
956*cbb3ec25SBjoern A. Zeeb 
957*cbb3ec25SBjoern A. Zeeb 		if (time_after(jiffies, msta->jiffies + HZ / 4)) {
958*cbb3ec25SBjoern A. Zeeb 			info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
959*cbb3ec25SBjoern A. Zeeb 			msta->jiffies = jiffies;
960*cbb3ec25SBjoern A. Zeeb 		}
961*cbb3ec25SBjoern A. Zeeb 	}
962*cbb3ec25SBjoern A. Zeeb 
963*cbb3ec25SBjoern A. Zeeb 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
964*cbb3ec25SBjoern A. Zeeb 	t->skb = tx_info->skb;
965*cbb3ec25SBjoern A. Zeeb 
966*cbb3ec25SBjoern A. Zeeb 	id = mt76_token_consume(mdev, &t);
967*cbb3ec25SBjoern A. Zeeb 	if (id < 0)
968*cbb3ec25SBjoern A. Zeeb 		return id;
969*cbb3ec25SBjoern A. Zeeb 
970*cbb3ec25SBjoern A. Zeeb 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
971*cbb3ec25SBjoern A. Zeeb 	mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
972*cbb3ec25SBjoern A. Zeeb 			      pid, qid, 0);
973*cbb3ec25SBjoern A. Zeeb 
974*cbb3ec25SBjoern A. Zeeb 	txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
975*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < nbuf; i++) {
976*cbb3ec25SBjoern A. Zeeb 		txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
977*cbb3ec25SBjoern A. Zeeb 		txp->fw.len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
978*cbb3ec25SBjoern A. Zeeb 	}
979*cbb3ec25SBjoern A. Zeeb 	txp->fw.nbuf = nbuf;
980*cbb3ec25SBjoern A. Zeeb 
981*cbb3ec25SBjoern A. Zeeb 	txp->fw.flags =
982*cbb3ec25SBjoern A. Zeeb 		cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD);
983*cbb3ec25SBjoern A. Zeeb 
984*cbb3ec25SBjoern A. Zeeb 	if (!key)
985*cbb3ec25SBjoern A. Zeeb 		txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
986*cbb3ec25SBjoern A. Zeeb 
987*cbb3ec25SBjoern A. Zeeb 	if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
988*cbb3ec25SBjoern A. Zeeb 		txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
989*cbb3ec25SBjoern A. Zeeb 
990*cbb3ec25SBjoern A. Zeeb 	if (vif) {
991*cbb3ec25SBjoern A. Zeeb 		struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
992*cbb3ec25SBjoern A. Zeeb 
993*cbb3ec25SBjoern A. Zeeb 		txp->fw.bss_idx = mvif->mt76.idx;
994*cbb3ec25SBjoern A. Zeeb 	}
995*cbb3ec25SBjoern A. Zeeb 
996*cbb3ec25SBjoern A. Zeeb 	txp->fw.token = cpu_to_le16(id);
997*cbb3ec25SBjoern A. Zeeb 	if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
998*cbb3ec25SBjoern A. Zeeb 		txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
999*cbb3ec25SBjoern A. Zeeb 	else
1000*cbb3ec25SBjoern A. Zeeb 		txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
1001*cbb3ec25SBjoern A. Zeeb 	tx_info->skb = DMA_DUMMY_DATA;
1002*cbb3ec25SBjoern A. Zeeb 
1003*cbb3ec25SBjoern A. Zeeb 	/* pass partial skb header to fw */
1004*cbb3ec25SBjoern A. Zeeb 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
1005*cbb3ec25SBjoern A. Zeeb 	tx_info->buf[1].skip_unmap = true;
1006*cbb3ec25SBjoern A. Zeeb 	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1007*cbb3ec25SBjoern A. Zeeb 
1008*cbb3ec25SBjoern A. Zeeb 	return 0;
1009*cbb3ec25SBjoern A. Zeeb }
1010*cbb3ec25SBjoern A. Zeeb 
1011*cbb3ec25SBjoern A. Zeeb static void
1012*cbb3ec25SBjoern A. Zeeb mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
1013*cbb3ec25SBjoern A. Zeeb {
1014*cbb3ec25SBjoern A. Zeeb 	struct mt7996_sta *msta;
1015*cbb3ec25SBjoern A. Zeeb 	u16 fc, tid;
1016*cbb3ec25SBjoern A. Zeeb 	u32 val;
1017*cbb3ec25SBjoern A. Zeeb 
1018*cbb3ec25SBjoern A. Zeeb 	if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
1019*cbb3ec25SBjoern A. Zeeb 		return;
1020*cbb3ec25SBjoern A. Zeeb 
1021*cbb3ec25SBjoern A. Zeeb 	tid = le32_get_bits(txwi[1], MT_TXD1_TID);
1022*cbb3ec25SBjoern A. Zeeb 	if (tid >= 6) /* skip VO queue */
1023*cbb3ec25SBjoern A. Zeeb 		return;
1024*cbb3ec25SBjoern A. Zeeb 
1025*cbb3ec25SBjoern A. Zeeb 	val = le32_to_cpu(txwi[2]);
1026*cbb3ec25SBjoern A. Zeeb 	fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
1027*cbb3ec25SBjoern A. Zeeb 	     FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
1028*cbb3ec25SBjoern A. Zeeb 	if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1029*cbb3ec25SBjoern A. Zeeb 		return;
1030*cbb3ec25SBjoern A. Zeeb 
1031*cbb3ec25SBjoern A. Zeeb 	msta = (struct mt7996_sta *)sta->drv_priv;
1032*cbb3ec25SBjoern A. Zeeb 	if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
1033*cbb3ec25SBjoern A. Zeeb 		ieee80211_start_tx_ba_session(sta, tid, 0);
1034*cbb3ec25SBjoern A. Zeeb }
1035*cbb3ec25SBjoern A. Zeeb 
1036*cbb3ec25SBjoern A. Zeeb static void
1037*cbb3ec25SBjoern A. Zeeb mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1038*cbb3ec25SBjoern A. Zeeb 		 struct ieee80211_sta *sta, struct list_head *free_list)
1039*cbb3ec25SBjoern A. Zeeb {
1040*cbb3ec25SBjoern A. Zeeb 	struct mt76_dev *mdev = &dev->mt76;
1041*cbb3ec25SBjoern A. Zeeb 	struct mt76_wcid *wcid;
1042*cbb3ec25SBjoern A. Zeeb 	__le32 *txwi;
1043*cbb3ec25SBjoern A. Zeeb 	u16 wcid_idx;
1044*cbb3ec25SBjoern A. Zeeb 
1045*cbb3ec25SBjoern A. Zeeb 	mt76_connac_txp_skb_unmap(mdev, t);
1046*cbb3ec25SBjoern A. Zeeb 	if (!t->skb)
1047*cbb3ec25SBjoern A. Zeeb 		goto out;
1048*cbb3ec25SBjoern A. Zeeb 
1049*cbb3ec25SBjoern A. Zeeb 	txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1050*cbb3ec25SBjoern A. Zeeb 	if (sta) {
1051*cbb3ec25SBjoern A. Zeeb 		wcid = (struct mt76_wcid *)sta->drv_priv;
1052*cbb3ec25SBjoern A. Zeeb 		wcid_idx = wcid->idx;
1053*cbb3ec25SBjoern A. Zeeb 
1054*cbb3ec25SBjoern A. Zeeb 		if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1055*cbb3ec25SBjoern A. Zeeb 			mt7996_tx_check_aggr(sta, txwi);
1056*cbb3ec25SBjoern A. Zeeb 	} else {
1057*cbb3ec25SBjoern A. Zeeb 		wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1058*cbb3ec25SBjoern A. Zeeb 	}
1059*cbb3ec25SBjoern A. Zeeb 
1060*cbb3ec25SBjoern A. Zeeb 	__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1061*cbb3ec25SBjoern A. Zeeb 
1062*cbb3ec25SBjoern A. Zeeb out:
1063*cbb3ec25SBjoern A. Zeeb 	t->skb = NULL;
1064*cbb3ec25SBjoern A. Zeeb 	mt76_put_txwi(mdev, t);
1065*cbb3ec25SBjoern A. Zeeb }
1066*cbb3ec25SBjoern A. Zeeb 
1067*cbb3ec25SBjoern A. Zeeb static void
1068*cbb3ec25SBjoern A. Zeeb mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1069*cbb3ec25SBjoern A. Zeeb {
1070*cbb3ec25SBjoern A. Zeeb 	__le32 *tx_free = (__le32 *)data, *cur_info;
1071*cbb3ec25SBjoern A. Zeeb 	struct mt76_dev *mdev = &dev->mt76;
1072*cbb3ec25SBjoern A. Zeeb 	struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1073*cbb3ec25SBjoern A. Zeeb 	struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1074*cbb3ec25SBjoern A. Zeeb 	struct mt76_txwi_cache *txwi;
1075*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_sta *sta = NULL;
1076*cbb3ec25SBjoern A. Zeeb 	LIST_HEAD(free_list);
1077*cbb3ec25SBjoern A. Zeeb 	struct sk_buff *skb, *tmp;
1078*cbb3ec25SBjoern A. Zeeb #if defined(__linux__)
1079*cbb3ec25SBjoern A. Zeeb 	void *end = data + len;
1080*cbb3ec25SBjoern A. Zeeb #elif defined(__FreeBSD__)
1081*cbb3ec25SBjoern A. Zeeb 	void *end = (u8 *)data + len;
1082*cbb3ec25SBjoern A. Zeeb #endif
1083*cbb3ec25SBjoern A. Zeeb 	bool wake = false;
1084*cbb3ec25SBjoern A. Zeeb 	u16 total, count = 0;
1085*cbb3ec25SBjoern A. Zeeb 
1086*cbb3ec25SBjoern A. Zeeb 	/* clean DMA queues and unmap buffers first */
1087*cbb3ec25SBjoern A. Zeeb 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1088*cbb3ec25SBjoern A. Zeeb 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1089*cbb3ec25SBjoern A. Zeeb 	if (phy2) {
1090*cbb3ec25SBjoern A. Zeeb 		mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1091*cbb3ec25SBjoern A. Zeeb 		mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1092*cbb3ec25SBjoern A. Zeeb 	}
1093*cbb3ec25SBjoern A. Zeeb 	if (phy3) {
1094*cbb3ec25SBjoern A. Zeeb 		mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1095*cbb3ec25SBjoern A. Zeeb 		mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1096*cbb3ec25SBjoern A. Zeeb 	}
1097*cbb3ec25SBjoern A. Zeeb 
1098*cbb3ec25SBjoern A. Zeeb 	if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
1099*cbb3ec25SBjoern A. Zeeb 		return;
1100*cbb3ec25SBjoern A. Zeeb 
1101*cbb3ec25SBjoern A. Zeeb 	total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1102*cbb3ec25SBjoern A. Zeeb 	for (cur_info = &tx_free[2]; count < total; cur_info++) {
1103*cbb3ec25SBjoern A. Zeeb 		u32 msdu, info;
1104*cbb3ec25SBjoern A. Zeeb 		u8 i;
1105*cbb3ec25SBjoern A. Zeeb 
1106*cbb3ec25SBjoern A. Zeeb 		if (WARN_ON_ONCE((void *)cur_info >= end))
1107*cbb3ec25SBjoern A. Zeeb 			return;
1108*cbb3ec25SBjoern A. Zeeb 		/* 1'b1: new wcid pair.
1109*cbb3ec25SBjoern A. Zeeb 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
1110*cbb3ec25SBjoern A. Zeeb 		 */
1111*cbb3ec25SBjoern A. Zeeb 		info = le32_to_cpu(*cur_info);
1112*cbb3ec25SBjoern A. Zeeb 		if (info & MT_TXFREE_INFO_PAIR) {
1113*cbb3ec25SBjoern A. Zeeb 			struct mt7996_sta *msta;
1114*cbb3ec25SBjoern A. Zeeb 			struct mt76_wcid *wcid;
1115*cbb3ec25SBjoern A. Zeeb 			u16 idx;
1116*cbb3ec25SBjoern A. Zeeb 
1117*cbb3ec25SBjoern A. Zeeb 			idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1118*cbb3ec25SBjoern A. Zeeb 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
1119*cbb3ec25SBjoern A. Zeeb 			sta = wcid_to_sta(wcid);
1120*cbb3ec25SBjoern A. Zeeb 			if (!sta)
1121*cbb3ec25SBjoern A. Zeeb 				continue;
1122*cbb3ec25SBjoern A. Zeeb 
1123*cbb3ec25SBjoern A. Zeeb 			msta = container_of(wcid, struct mt7996_sta, wcid);
1124*cbb3ec25SBjoern A. Zeeb 			spin_lock_bh(&mdev->sta_poll_lock);
1125*cbb3ec25SBjoern A. Zeeb 			if (list_empty(&msta->wcid.poll_list))
1126*cbb3ec25SBjoern A. Zeeb 				list_add_tail(&msta->wcid.poll_list,
1127*cbb3ec25SBjoern A. Zeeb 					      &mdev->sta_poll_list);
1128*cbb3ec25SBjoern A. Zeeb 			spin_unlock_bh(&mdev->sta_poll_lock);
1129*cbb3ec25SBjoern A. Zeeb 			continue;
1130*cbb3ec25SBjoern A. Zeeb 		}
1131*cbb3ec25SBjoern A. Zeeb 
1132*cbb3ec25SBjoern A. Zeeb 		if (info & MT_TXFREE_INFO_HEADER)
1133*cbb3ec25SBjoern A. Zeeb 			continue;
1134*cbb3ec25SBjoern A. Zeeb 
1135*cbb3ec25SBjoern A. Zeeb 		for (i = 0; i < 2; i++) {
1136*cbb3ec25SBjoern A. Zeeb 			msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1137*cbb3ec25SBjoern A. Zeeb 			if (msdu == MT_TXFREE_INFO_MSDU_ID)
1138*cbb3ec25SBjoern A. Zeeb 				continue;
1139*cbb3ec25SBjoern A. Zeeb 
1140*cbb3ec25SBjoern A. Zeeb 			count++;
1141*cbb3ec25SBjoern A. Zeeb 			txwi = mt76_token_release(mdev, msdu, &wake);
1142*cbb3ec25SBjoern A. Zeeb 			if (!txwi)
1143*cbb3ec25SBjoern A. Zeeb 				continue;
1144*cbb3ec25SBjoern A. Zeeb 
1145*cbb3ec25SBjoern A. Zeeb 			mt7996_txwi_free(dev, txwi, sta, &free_list);
1146*cbb3ec25SBjoern A. Zeeb 		}
1147*cbb3ec25SBjoern A. Zeeb 	}
1148*cbb3ec25SBjoern A. Zeeb 
1149*cbb3ec25SBjoern A. Zeeb 	mt7996_mac_sta_poll(dev);
1150*cbb3ec25SBjoern A. Zeeb 
1151*cbb3ec25SBjoern A. Zeeb 	if (wake)
1152*cbb3ec25SBjoern A. Zeeb 		mt76_set_tx_blocked(&dev->mt76, false);
1153*cbb3ec25SBjoern A. Zeeb 
1154*cbb3ec25SBjoern A. Zeeb 	mt76_worker_schedule(&dev->mt76.tx_worker);
1155*cbb3ec25SBjoern A. Zeeb 
1156*cbb3ec25SBjoern A. Zeeb 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
1157*cbb3ec25SBjoern A. Zeeb 		skb_list_del_init(skb);
1158*cbb3ec25SBjoern A. Zeeb 		napi_consume_skb(skb, 1);
1159*cbb3ec25SBjoern A. Zeeb 	}
1160*cbb3ec25SBjoern A. Zeeb }
1161*cbb3ec25SBjoern A. Zeeb 
1162*cbb3ec25SBjoern A. Zeeb static bool
1163*cbb3ec25SBjoern A. Zeeb mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
1164*cbb3ec25SBjoern A. Zeeb 		       int pid, __le32 *txs_data)
1165*cbb3ec25SBjoern A. Zeeb {
1166*cbb3ec25SBjoern A. Zeeb 	struct mt76_sta_stats *stats = &wcid->stats;
1167*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_supported_band *sband;
1168*cbb3ec25SBjoern A. Zeeb 	struct mt76_dev *mdev = &dev->mt76;
1169*cbb3ec25SBjoern A. Zeeb 	struct mt76_phy *mphy;
1170*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_tx_info *info;
1171*cbb3ec25SBjoern A. Zeeb 	struct sk_buff_head list;
1172*cbb3ec25SBjoern A. Zeeb 	struct rate_info rate = {};
1173*cbb3ec25SBjoern A. Zeeb 	struct sk_buff *skb;
1174*cbb3ec25SBjoern A. Zeeb 	bool cck = false;
1175*cbb3ec25SBjoern A. Zeeb 	u32 txrate, txs, mode, stbc;
1176*cbb3ec25SBjoern A. Zeeb 
1177*cbb3ec25SBjoern A. Zeeb 	mt76_tx_status_lock(mdev, &list);
1178*cbb3ec25SBjoern A. Zeeb 	skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1179*cbb3ec25SBjoern A. Zeeb 	if (!skb)
1180*cbb3ec25SBjoern A. Zeeb 		goto out_no_skb;
1181*cbb3ec25SBjoern A. Zeeb 
1182*cbb3ec25SBjoern A. Zeeb 	txs = le32_to_cpu(txs_data[0]);
1183*cbb3ec25SBjoern A. Zeeb 
1184*cbb3ec25SBjoern A. Zeeb 	info = IEEE80211_SKB_CB(skb);
1185*cbb3ec25SBjoern A. Zeeb 	if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1186*cbb3ec25SBjoern A. Zeeb 		info->flags |= IEEE80211_TX_STAT_ACK;
1187*cbb3ec25SBjoern A. Zeeb 
1188*cbb3ec25SBjoern A. Zeeb 	info->status.ampdu_len = 1;
1189*cbb3ec25SBjoern A. Zeeb 	info->status.ampdu_ack_len = !!(info->flags &
1190*cbb3ec25SBjoern A. Zeeb 					IEEE80211_TX_STAT_ACK);
1191*cbb3ec25SBjoern A. Zeeb 
1192*cbb3ec25SBjoern A. Zeeb 	info->status.rates[0].idx = -1;
1193*cbb3ec25SBjoern A. Zeeb 
1194*cbb3ec25SBjoern A. Zeeb 	txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1195*cbb3ec25SBjoern A. Zeeb 
1196*cbb3ec25SBjoern A. Zeeb 	rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1197*cbb3ec25SBjoern A. Zeeb 	rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1198*cbb3ec25SBjoern A. Zeeb 	stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1199*cbb3ec25SBjoern A. Zeeb 
1200*cbb3ec25SBjoern A. Zeeb 	if (stbc && rate.nss > 1)
1201*cbb3ec25SBjoern A. Zeeb 		rate.nss >>= 1;
1202*cbb3ec25SBjoern A. Zeeb 
1203*cbb3ec25SBjoern A. Zeeb 	if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1204*cbb3ec25SBjoern A. Zeeb 		stats->tx_nss[rate.nss - 1]++;
1205*cbb3ec25SBjoern A. Zeeb 	if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1206*cbb3ec25SBjoern A. Zeeb 		stats->tx_mcs[rate.mcs]++;
1207*cbb3ec25SBjoern A. Zeeb 
1208*cbb3ec25SBjoern A. Zeeb 	mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1209*cbb3ec25SBjoern A. Zeeb 	switch (mode) {
1210*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_CCK:
1211*cbb3ec25SBjoern A. Zeeb 		cck = true;
1212*cbb3ec25SBjoern A. Zeeb 		fallthrough;
1213*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_OFDM:
1214*cbb3ec25SBjoern A. Zeeb 		mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1215*cbb3ec25SBjoern A. Zeeb 
1216*cbb3ec25SBjoern A. Zeeb 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1217*cbb3ec25SBjoern A. Zeeb 			sband = &mphy->sband_5g.sband;
1218*cbb3ec25SBjoern A. Zeeb 		else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1219*cbb3ec25SBjoern A. Zeeb 			sband = &mphy->sband_6g.sband;
1220*cbb3ec25SBjoern A. Zeeb 		else
1221*cbb3ec25SBjoern A. Zeeb 			sband = &mphy->sband_2g.sband;
1222*cbb3ec25SBjoern A. Zeeb 
1223*cbb3ec25SBjoern A. Zeeb 		rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1224*cbb3ec25SBjoern A. Zeeb 		rate.legacy = sband->bitrates[rate.mcs].bitrate;
1225*cbb3ec25SBjoern A. Zeeb 		break;
1226*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HT:
1227*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HT_GF:
1228*cbb3ec25SBjoern A. Zeeb 		if (rate.mcs > 31)
1229*cbb3ec25SBjoern A. Zeeb 			goto out;
1230*cbb3ec25SBjoern A. Zeeb 
1231*cbb3ec25SBjoern A. Zeeb 		rate.flags = RATE_INFO_FLAGS_MCS;
1232*cbb3ec25SBjoern A. Zeeb 		if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1233*cbb3ec25SBjoern A. Zeeb 			rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1234*cbb3ec25SBjoern A. Zeeb 		break;
1235*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_VHT:
1236*cbb3ec25SBjoern A. Zeeb 		if (rate.mcs > 9)
1237*cbb3ec25SBjoern A. Zeeb 			goto out;
1238*cbb3ec25SBjoern A. Zeeb 
1239*cbb3ec25SBjoern A. Zeeb 		rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1240*cbb3ec25SBjoern A. Zeeb 		break;
1241*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HE_SU:
1242*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HE_EXT_SU:
1243*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HE_TB:
1244*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_HE_MU:
1245*cbb3ec25SBjoern A. Zeeb 		if (rate.mcs > 11)
1246*cbb3ec25SBjoern A. Zeeb 			goto out;
1247*cbb3ec25SBjoern A. Zeeb 
1248*cbb3ec25SBjoern A. Zeeb 		rate.he_gi = wcid->rate.he_gi;
1249*cbb3ec25SBjoern A. Zeeb 		rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1250*cbb3ec25SBjoern A. Zeeb 		rate.flags = RATE_INFO_FLAGS_HE_MCS;
1251*cbb3ec25SBjoern A. Zeeb 		break;
1252*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_EHT_SU:
1253*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_EHT_TRIG:
1254*cbb3ec25SBjoern A. Zeeb 	case MT_PHY_TYPE_EHT_MU:
1255*cbb3ec25SBjoern A. Zeeb 		if (rate.mcs > 13)
1256*cbb3ec25SBjoern A. Zeeb 			goto out;
1257*cbb3ec25SBjoern A. Zeeb 
1258*cbb3ec25SBjoern A. Zeeb 		rate.eht_gi = wcid->rate.eht_gi;
1259*cbb3ec25SBjoern A. Zeeb 		rate.flags = RATE_INFO_FLAGS_EHT_MCS;
1260*cbb3ec25SBjoern A. Zeeb 		break;
1261*cbb3ec25SBjoern A. Zeeb 	default:
1262*cbb3ec25SBjoern A. Zeeb 		goto out;
1263*cbb3ec25SBjoern A. Zeeb 	}
1264*cbb3ec25SBjoern A. Zeeb 
1265*cbb3ec25SBjoern A. Zeeb 	stats->tx_mode[mode]++;
1266*cbb3ec25SBjoern A. Zeeb 
1267*cbb3ec25SBjoern A. Zeeb 	switch (FIELD_GET(MT_TXS0_BW, txs)) {
1268*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_STA_RX_BW_320:
1269*cbb3ec25SBjoern A. Zeeb 		rate.bw = RATE_INFO_BW_320;
1270*cbb3ec25SBjoern A. Zeeb 		stats->tx_bw[4]++;
1271*cbb3ec25SBjoern A. Zeeb 		break;
1272*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_STA_RX_BW_160:
1273*cbb3ec25SBjoern A. Zeeb 		rate.bw = RATE_INFO_BW_160;
1274*cbb3ec25SBjoern A. Zeeb 		stats->tx_bw[3]++;
1275*cbb3ec25SBjoern A. Zeeb 		break;
1276*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_STA_RX_BW_80:
1277*cbb3ec25SBjoern A. Zeeb 		rate.bw = RATE_INFO_BW_80;
1278*cbb3ec25SBjoern A. Zeeb 		stats->tx_bw[2]++;
1279*cbb3ec25SBjoern A. Zeeb 		break;
1280*cbb3ec25SBjoern A. Zeeb 	case IEEE80211_STA_RX_BW_40:
1281*cbb3ec25SBjoern A. Zeeb 		rate.bw = RATE_INFO_BW_40;
1282*cbb3ec25SBjoern A. Zeeb 		stats->tx_bw[1]++;
1283*cbb3ec25SBjoern A. Zeeb 		break;
1284*cbb3ec25SBjoern A. Zeeb 	default:
1285*cbb3ec25SBjoern A. Zeeb 		rate.bw = RATE_INFO_BW_20;
1286*cbb3ec25SBjoern A. Zeeb 		stats->tx_bw[0]++;
1287*cbb3ec25SBjoern A. Zeeb 		break;
1288*cbb3ec25SBjoern A. Zeeb 	}
1289*cbb3ec25SBjoern A. Zeeb 	wcid->rate = rate;
1290*cbb3ec25SBjoern A. Zeeb 
1291*cbb3ec25SBjoern A. Zeeb out:
1292*cbb3ec25SBjoern A. Zeeb 	mt76_tx_status_skb_done(mdev, skb, &list);
1293*cbb3ec25SBjoern A. Zeeb 
1294*cbb3ec25SBjoern A. Zeeb out_no_skb:
1295*cbb3ec25SBjoern A. Zeeb 	mt76_tx_status_unlock(mdev, &list);
1296*cbb3ec25SBjoern A. Zeeb 
1297*cbb3ec25SBjoern A. Zeeb 	return !!skb;
1298*cbb3ec25SBjoern A. Zeeb }
1299*cbb3ec25SBjoern A. Zeeb 
1300*cbb3ec25SBjoern A. Zeeb static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1301*cbb3ec25SBjoern A. Zeeb {
1302*cbb3ec25SBjoern A. Zeeb 	struct mt7996_sta *msta = NULL;
1303*cbb3ec25SBjoern A. Zeeb 	struct mt76_wcid *wcid;
1304*cbb3ec25SBjoern A. Zeeb 	__le32 *txs_data = data;
1305*cbb3ec25SBjoern A. Zeeb 	u16 wcidx;
1306*cbb3ec25SBjoern A. Zeeb 	u8 pid;
1307*cbb3ec25SBjoern A. Zeeb 
1308*cbb3ec25SBjoern A. Zeeb 	if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
1309*cbb3ec25SBjoern A. Zeeb 		return;
1310*cbb3ec25SBjoern A. Zeeb 
1311*cbb3ec25SBjoern A. Zeeb 	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1312*cbb3ec25SBjoern A. Zeeb 	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1313*cbb3ec25SBjoern A. Zeeb 
1314*cbb3ec25SBjoern A. Zeeb 	if (pid < MT_PACKET_ID_FIRST)
1315*cbb3ec25SBjoern A. Zeeb 		return;
1316*cbb3ec25SBjoern A. Zeeb 
1317*cbb3ec25SBjoern A. Zeeb 	if (wcidx >= mt7996_wtbl_size(dev))
1318*cbb3ec25SBjoern A. Zeeb 		return;
1319*cbb3ec25SBjoern A. Zeeb 
1320*cbb3ec25SBjoern A. Zeeb 	rcu_read_lock();
1321*cbb3ec25SBjoern A. Zeeb 
1322*cbb3ec25SBjoern A. Zeeb 	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1323*cbb3ec25SBjoern A. Zeeb 	if (!wcid)
1324*cbb3ec25SBjoern A. Zeeb 		goto out;
1325*cbb3ec25SBjoern A. Zeeb 
1326*cbb3ec25SBjoern A. Zeeb 	msta = container_of(wcid, struct mt7996_sta, wcid);
1327*cbb3ec25SBjoern A. Zeeb 
1328*cbb3ec25SBjoern A. Zeeb 	mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
1329*cbb3ec25SBjoern A. Zeeb 
1330*cbb3ec25SBjoern A. Zeeb 	if (!wcid->sta)
1331*cbb3ec25SBjoern A. Zeeb 		goto out;
1332*cbb3ec25SBjoern A. Zeeb 
1333*cbb3ec25SBjoern A. Zeeb 	spin_lock_bh(&dev->mt76.sta_poll_lock);
1334*cbb3ec25SBjoern A. Zeeb 	if (list_empty(&msta->wcid.poll_list))
1335*cbb3ec25SBjoern A. Zeeb 		list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
1336*cbb3ec25SBjoern A. Zeeb 	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1337*cbb3ec25SBjoern A. Zeeb 
1338*cbb3ec25SBjoern A. Zeeb out:
1339*cbb3ec25SBjoern A. Zeeb 	rcu_read_unlock();
1340*cbb3ec25SBjoern A. Zeeb }
1341*cbb3ec25SBjoern A. Zeeb 
1342*cbb3ec25SBjoern A. Zeeb bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1343*cbb3ec25SBjoern A. Zeeb {
1344*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1345*cbb3ec25SBjoern A. Zeeb 	__le32 *rxd = (__le32 *)data;
1346*cbb3ec25SBjoern A. Zeeb 	__le32 *end = (__le32 *)&rxd[len / 4];
1347*cbb3ec25SBjoern A. Zeeb 	enum rx_pkt_type type;
1348*cbb3ec25SBjoern A. Zeeb 
1349*cbb3ec25SBjoern A. Zeeb 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1350*cbb3ec25SBjoern A. Zeeb 	if (type != PKT_TYPE_NORMAL) {
1351*cbb3ec25SBjoern A. Zeeb 		u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1352*cbb3ec25SBjoern A. Zeeb 
1353*cbb3ec25SBjoern A. Zeeb 		if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1354*cbb3ec25SBjoern A. Zeeb 			     MT_RXD0_SW_PKT_TYPE_FRAME))
1355*cbb3ec25SBjoern A. Zeeb 			return true;
1356*cbb3ec25SBjoern A. Zeeb 	}
1357*cbb3ec25SBjoern A. Zeeb 
1358*cbb3ec25SBjoern A. Zeeb 	switch (type) {
1359*cbb3ec25SBjoern A. Zeeb 	case PKT_TYPE_TXRX_NOTIFY:
1360*cbb3ec25SBjoern A. Zeeb 		mt7996_mac_tx_free(dev, data, len);
1361*cbb3ec25SBjoern A. Zeeb 		return false;
1362*cbb3ec25SBjoern A. Zeeb 	case PKT_TYPE_TXS:
1363*cbb3ec25SBjoern A. Zeeb 		for (rxd += 4; rxd + 8 <= end; rxd += 8)
1364*cbb3ec25SBjoern A. Zeeb 			mt7996_mac_add_txs(dev, rxd);
1365*cbb3ec25SBjoern A. Zeeb 		return false;
1366*cbb3ec25SBjoern A. Zeeb 	case PKT_TYPE_RX_FW_MONITOR:
1367*cbb3ec25SBjoern A. Zeeb 		mt7996_debugfs_rx_fw_monitor(dev, data, len);
1368*cbb3ec25SBjoern A. Zeeb 		return false;
1369*cbb3ec25SBjoern A. Zeeb 	default:
1370*cbb3ec25SBjoern A. Zeeb 		return true;
1371*cbb3ec25SBjoern A. Zeeb 	}
1372*cbb3ec25SBjoern A. Zeeb }
1373*cbb3ec25SBjoern A. Zeeb 
1374*cbb3ec25SBjoern A. Zeeb void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1375*cbb3ec25SBjoern A. Zeeb 			 struct sk_buff *skb, u32 *info)
1376*cbb3ec25SBjoern A. Zeeb {
1377*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1378*cbb3ec25SBjoern A. Zeeb 	__le32 *rxd = (__le32 *)skb->data;
1379*cbb3ec25SBjoern A. Zeeb 	__le32 *end = (__le32 *)&skb->data[skb->len];
1380*cbb3ec25SBjoern A. Zeeb 	enum rx_pkt_type type;
1381*cbb3ec25SBjoern A. Zeeb 
1382*cbb3ec25SBjoern A. Zeeb 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1383*cbb3ec25SBjoern A. Zeeb 	if (type != PKT_TYPE_NORMAL) {
1384*cbb3ec25SBjoern A. Zeeb 		u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1385*cbb3ec25SBjoern A. Zeeb 
1386*cbb3ec25SBjoern A. Zeeb 		if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1387*cbb3ec25SBjoern A. Zeeb 			     MT_RXD0_SW_PKT_TYPE_FRAME))
1388*cbb3ec25SBjoern A. Zeeb 			type = PKT_TYPE_NORMAL;
1389*cbb3ec25SBjoern A. Zeeb 	}
1390*cbb3ec25SBjoern A. Zeeb 
1391*cbb3ec25SBjoern A. Zeeb 	switch (type) {
1392*cbb3ec25SBjoern A. Zeeb 	case PKT_TYPE_TXRX_NOTIFY:
1393*cbb3ec25SBjoern A. Zeeb 		mt7996_mac_tx_free(dev, skb->data, skb->len);
1394*cbb3ec25SBjoern A. Zeeb 		napi_consume_skb(skb, 1);
1395*cbb3ec25SBjoern A. Zeeb 		break;
1396*cbb3ec25SBjoern A. Zeeb 	case PKT_TYPE_RX_EVENT:
1397*cbb3ec25SBjoern A. Zeeb 		mt7996_mcu_rx_event(dev, skb);
1398*cbb3ec25SBjoern A. Zeeb 		break;
1399*cbb3ec25SBjoern A. Zeeb 	case PKT_TYPE_TXS:
1400*cbb3ec25SBjoern A. Zeeb 		for (rxd += 4; rxd + 8 <= end; rxd += 8)
1401*cbb3ec25SBjoern A. Zeeb 			mt7996_mac_add_txs(dev, rxd);
1402*cbb3ec25SBjoern A. Zeeb 		dev_kfree_skb(skb);
1403*cbb3ec25SBjoern A. Zeeb 		break;
1404*cbb3ec25SBjoern A. Zeeb 	case PKT_TYPE_RX_FW_MONITOR:
1405*cbb3ec25SBjoern A. Zeeb 		mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1406*cbb3ec25SBjoern A. Zeeb 		dev_kfree_skb(skb);
1407*cbb3ec25SBjoern A. Zeeb 		break;
1408*cbb3ec25SBjoern A. Zeeb 	case PKT_TYPE_NORMAL:
1409*cbb3ec25SBjoern A. Zeeb 		if (!mt7996_mac_fill_rx(dev, skb)) {
1410*cbb3ec25SBjoern A. Zeeb 			mt76_rx(&dev->mt76, q, skb);
1411*cbb3ec25SBjoern A. Zeeb 			return;
1412*cbb3ec25SBjoern A. Zeeb 		}
1413*cbb3ec25SBjoern A. Zeeb 		fallthrough;
1414*cbb3ec25SBjoern A. Zeeb 	default:
1415*cbb3ec25SBjoern A. Zeeb 		dev_kfree_skb(skb);
1416*cbb3ec25SBjoern A. Zeeb 		break;
1417*cbb3ec25SBjoern A. Zeeb 	}
1418*cbb3ec25SBjoern A. Zeeb }
1419*cbb3ec25SBjoern A. Zeeb 
1420*cbb3ec25SBjoern A. Zeeb void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1421*cbb3ec25SBjoern A. Zeeb {
1422*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = phy->dev;
1423*cbb3ec25SBjoern A. Zeeb 	u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
1424*cbb3ec25SBjoern A. Zeeb 
1425*cbb3ec25SBjoern A. Zeeb 	mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
1426*cbb3ec25SBjoern A. Zeeb 	mt76_set(dev, reg, BIT(11) | BIT(9));
1427*cbb3ec25SBjoern A. Zeeb }
1428*cbb3ec25SBjoern A. Zeeb 
1429*cbb3ec25SBjoern A. Zeeb void mt7996_mac_reset_counters(struct mt7996_phy *phy)
1430*cbb3ec25SBjoern A. Zeeb {
1431*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = phy->dev;
1432*cbb3ec25SBjoern A. Zeeb 	u8 band_idx = phy->mt76->band_idx;
1433*cbb3ec25SBjoern A. Zeeb 	int i;
1434*cbb3ec25SBjoern A. Zeeb 
1435*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < 16; i++)
1436*cbb3ec25SBjoern A. Zeeb 		mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
1437*cbb3ec25SBjoern A. Zeeb 
1438*cbb3ec25SBjoern A. Zeeb 	phy->mt76->survey_time = ktime_get_boottime();
1439*cbb3ec25SBjoern A. Zeeb 
1440*cbb3ec25SBjoern A. Zeeb 	memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1441*cbb3ec25SBjoern A. Zeeb 
1442*cbb3ec25SBjoern A. Zeeb 	/* reset airtime counters */
1443*cbb3ec25SBjoern A. Zeeb 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
1444*cbb3ec25SBjoern A. Zeeb 		 MT_WF_RMAC_MIB_RXTIME_CLR);
1445*cbb3ec25SBjoern A. Zeeb 
1446*cbb3ec25SBjoern A. Zeeb 	mt7996_mcu_get_chan_mib_info(phy, true);
1447*cbb3ec25SBjoern A. Zeeb }
1448*cbb3ec25SBjoern A. Zeeb 
1449*cbb3ec25SBjoern A. Zeeb void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
1450*cbb3ec25SBjoern A. Zeeb {
1451*cbb3ec25SBjoern A. Zeeb 	s16 coverage_class = phy->coverage_class;
1452*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = phy->dev;
1453*cbb3ec25SBjoern A. Zeeb 	struct mt7996_phy *phy2 = mt7996_phy2(dev);
1454*cbb3ec25SBjoern A. Zeeb 	struct mt7996_phy *phy3 = mt7996_phy3(dev);
1455*cbb3ec25SBjoern A. Zeeb 	u32 reg_offset;
1456*cbb3ec25SBjoern A. Zeeb 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1457*cbb3ec25SBjoern A. Zeeb 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1458*cbb3ec25SBjoern A. Zeeb 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1459*cbb3ec25SBjoern A. Zeeb 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1460*cbb3ec25SBjoern A. Zeeb 	u8 band_idx = phy->mt76->band_idx;
1461*cbb3ec25SBjoern A. Zeeb 	int offset;
1462*cbb3ec25SBjoern A. Zeeb 
1463*cbb3ec25SBjoern A. Zeeb 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1464*cbb3ec25SBjoern A. Zeeb 		return;
1465*cbb3ec25SBjoern A. Zeeb 
1466*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1467*cbb3ec25SBjoern A. Zeeb 		coverage_class = max_t(s16, dev->phy.coverage_class,
1468*cbb3ec25SBjoern A. Zeeb 				       phy2->coverage_class);
1469*cbb3ec25SBjoern A. Zeeb 
1470*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1471*cbb3ec25SBjoern A. Zeeb 		coverage_class = max_t(s16, coverage_class,
1472*cbb3ec25SBjoern A. Zeeb 				       phy3->coverage_class);
1473*cbb3ec25SBjoern A. Zeeb 
1474*cbb3ec25SBjoern A. Zeeb 	offset = 3 * coverage_class;
1475*cbb3ec25SBjoern A. Zeeb 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1476*cbb3ec25SBjoern A. Zeeb 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1477*cbb3ec25SBjoern A. Zeeb 
1478*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
1479*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
1480*cbb3ec25SBjoern A. Zeeb }
1481*cbb3ec25SBjoern A. Zeeb 
1482*cbb3ec25SBjoern A. Zeeb void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
1483*cbb3ec25SBjoern A. Zeeb {
1484*cbb3ec25SBjoern A. Zeeb 	mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
1485*cbb3ec25SBjoern A. Zeeb 		 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
1486*cbb3ec25SBjoern A. Zeeb 		 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
1487*cbb3ec25SBjoern A. Zeeb 
1488*cbb3ec25SBjoern A. Zeeb 	mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
1489*cbb3ec25SBjoern A. Zeeb 		 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
1490*cbb3ec25SBjoern A. Zeeb }
1491*cbb3ec25SBjoern A. Zeeb 
1492*cbb3ec25SBjoern A. Zeeb static u8
1493*cbb3ec25SBjoern A. Zeeb mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
1494*cbb3ec25SBjoern A. Zeeb {
1495*cbb3ec25SBjoern A. Zeeb 	static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1496*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = phy->dev;
1497*cbb3ec25SBjoern A. Zeeb 	u32 val, sum = 0, n = 0;
1498*cbb3ec25SBjoern A. Zeeb 	int ant, i;
1499*cbb3ec25SBjoern A. Zeeb 
1500*cbb3ec25SBjoern A. Zeeb 	for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
1501*cbb3ec25SBjoern A. Zeeb 		u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
1502*cbb3ec25SBjoern A. Zeeb 
1503*cbb3ec25SBjoern A. Zeeb 		for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1504*cbb3ec25SBjoern A. Zeeb 			val = mt76_rr(dev, reg);
1505*cbb3ec25SBjoern A. Zeeb 			sum += val * nf_power[i];
1506*cbb3ec25SBjoern A. Zeeb 			n += val;
1507*cbb3ec25SBjoern A. Zeeb 		}
1508*cbb3ec25SBjoern A. Zeeb 	}
1509*cbb3ec25SBjoern A. Zeeb 
1510*cbb3ec25SBjoern A. Zeeb 	return n ? sum / n : 0;
1511*cbb3ec25SBjoern A. Zeeb }
1512*cbb3ec25SBjoern A. Zeeb 
1513*cbb3ec25SBjoern A. Zeeb void mt7996_update_channel(struct mt76_phy *mphy)
1514*cbb3ec25SBjoern A. Zeeb {
1515*cbb3ec25SBjoern A. Zeeb 	struct mt7996_phy *phy = (struct mt7996_phy *)mphy->priv;
1516*cbb3ec25SBjoern A. Zeeb 	struct mt76_channel_state *state = mphy->chan_state;
1517*cbb3ec25SBjoern A. Zeeb 	int nf;
1518*cbb3ec25SBjoern A. Zeeb 
1519*cbb3ec25SBjoern A. Zeeb 	mt7996_mcu_get_chan_mib_info(phy, false);
1520*cbb3ec25SBjoern A. Zeeb 
1521*cbb3ec25SBjoern A. Zeeb 	nf = mt7996_phy_get_nf(phy, mphy->band_idx);
1522*cbb3ec25SBjoern A. Zeeb 	if (!phy->noise)
1523*cbb3ec25SBjoern A. Zeeb 		phy->noise = nf << 4;
1524*cbb3ec25SBjoern A. Zeeb 	else if (nf)
1525*cbb3ec25SBjoern A. Zeeb 		phy->noise += nf - (phy->noise >> 4);
1526*cbb3ec25SBjoern A. Zeeb 
1527*cbb3ec25SBjoern A. Zeeb 	state->noise = -(phy->noise >> 4);
1528*cbb3ec25SBjoern A. Zeeb }
1529*cbb3ec25SBjoern A. Zeeb 
1530*cbb3ec25SBjoern A. Zeeb static bool
1531*cbb3ec25SBjoern A. Zeeb mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
1532*cbb3ec25SBjoern A. Zeeb {
1533*cbb3ec25SBjoern A. Zeeb 	bool ret;
1534*cbb3ec25SBjoern A. Zeeb 
1535*cbb3ec25SBjoern A. Zeeb 	ret = wait_event_timeout(dev->reset_wait,
1536*cbb3ec25SBjoern A. Zeeb 				 (READ_ONCE(dev->recovery.state) & state),
1537*cbb3ec25SBjoern A. Zeeb 				 MT7996_RESET_TIMEOUT);
1538*cbb3ec25SBjoern A. Zeeb 
1539*cbb3ec25SBjoern A. Zeeb 	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1540*cbb3ec25SBjoern A. Zeeb 	return ret;
1541*cbb3ec25SBjoern A. Zeeb }
1542*cbb3ec25SBjoern A. Zeeb 
1543*cbb3ec25SBjoern A. Zeeb static void
1544*cbb3ec25SBjoern A. Zeeb mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1545*cbb3ec25SBjoern A. Zeeb {
1546*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_hw *hw = priv;
1547*cbb3ec25SBjoern A. Zeeb 
1548*cbb3ec25SBjoern A. Zeeb 	switch (vif->type) {
1549*cbb3ec25SBjoern A. Zeeb 	case NL80211_IFTYPE_MESH_POINT:
1550*cbb3ec25SBjoern A. Zeeb 	case NL80211_IFTYPE_ADHOC:
1551*cbb3ec25SBjoern A. Zeeb 	case NL80211_IFTYPE_AP:
1552*cbb3ec25SBjoern A. Zeeb 		mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
1553*cbb3ec25SBjoern A. Zeeb 		break;
1554*cbb3ec25SBjoern A. Zeeb 	default:
1555*cbb3ec25SBjoern A. Zeeb 		break;
1556*cbb3ec25SBjoern A. Zeeb 	}
1557*cbb3ec25SBjoern A. Zeeb }
1558*cbb3ec25SBjoern A. Zeeb 
1559*cbb3ec25SBjoern A. Zeeb static void
1560*cbb3ec25SBjoern A. Zeeb mt7996_update_beacons(struct mt7996_dev *dev)
1561*cbb3ec25SBjoern A. Zeeb {
1562*cbb3ec25SBjoern A. Zeeb 	struct mt76_phy *phy2, *phy3;
1563*cbb3ec25SBjoern A. Zeeb 
1564*cbb3ec25SBjoern A. Zeeb 	ieee80211_iterate_active_interfaces(dev->mt76.hw,
1565*cbb3ec25SBjoern A. Zeeb 					    IEEE80211_IFACE_ITER_RESUME_ALL,
1566*cbb3ec25SBjoern A. Zeeb 					    mt7996_update_vif_beacon, dev->mt76.hw);
1567*cbb3ec25SBjoern A. Zeeb 
1568*cbb3ec25SBjoern A. Zeeb 	phy2 = dev->mt76.phys[MT_BAND1];
1569*cbb3ec25SBjoern A. Zeeb 	if (!phy2)
1570*cbb3ec25SBjoern A. Zeeb 		return;
1571*cbb3ec25SBjoern A. Zeeb 
1572*cbb3ec25SBjoern A. Zeeb 	ieee80211_iterate_active_interfaces(phy2->hw,
1573*cbb3ec25SBjoern A. Zeeb 					    IEEE80211_IFACE_ITER_RESUME_ALL,
1574*cbb3ec25SBjoern A. Zeeb 					    mt7996_update_vif_beacon, phy2->hw);
1575*cbb3ec25SBjoern A. Zeeb 
1576*cbb3ec25SBjoern A. Zeeb 	phy3 = dev->mt76.phys[MT_BAND2];
1577*cbb3ec25SBjoern A. Zeeb 	if (!phy3)
1578*cbb3ec25SBjoern A. Zeeb 		return;
1579*cbb3ec25SBjoern A. Zeeb 
1580*cbb3ec25SBjoern A. Zeeb 	ieee80211_iterate_active_interfaces(phy3->hw,
1581*cbb3ec25SBjoern A. Zeeb 					    IEEE80211_IFACE_ITER_RESUME_ALL,
1582*cbb3ec25SBjoern A. Zeeb 					    mt7996_update_vif_beacon, phy3->hw);
1583*cbb3ec25SBjoern A. Zeeb }
1584*cbb3ec25SBjoern A. Zeeb 
1585*cbb3ec25SBjoern A. Zeeb void mt7996_tx_token_put(struct mt7996_dev *dev)
1586*cbb3ec25SBjoern A. Zeeb {
1587*cbb3ec25SBjoern A. Zeeb 	struct mt76_txwi_cache *txwi;
1588*cbb3ec25SBjoern A. Zeeb 	int id;
1589*cbb3ec25SBjoern A. Zeeb 
1590*cbb3ec25SBjoern A. Zeeb 	spin_lock_bh(&dev->mt76.token_lock);
1591*cbb3ec25SBjoern A. Zeeb 	idr_for_each_entry(&dev->mt76.token, txwi, id) {
1592*cbb3ec25SBjoern A. Zeeb 		mt7996_txwi_free(dev, txwi, NULL, NULL);
1593*cbb3ec25SBjoern A. Zeeb 		dev->mt76.token_count--;
1594*cbb3ec25SBjoern A. Zeeb 	}
1595*cbb3ec25SBjoern A. Zeeb 	spin_unlock_bh(&dev->mt76.token_lock);
1596*cbb3ec25SBjoern A. Zeeb 	idr_destroy(&dev->mt76.token);
1597*cbb3ec25SBjoern A. Zeeb }
1598*cbb3ec25SBjoern A. Zeeb 
1599*cbb3ec25SBjoern A. Zeeb static int
1600*cbb3ec25SBjoern A. Zeeb mt7996_mac_restart(struct mt7996_dev *dev)
1601*cbb3ec25SBjoern A. Zeeb {
1602*cbb3ec25SBjoern A. Zeeb 	struct mt7996_phy *phy2, *phy3;
1603*cbb3ec25SBjoern A. Zeeb 	struct mt76_dev *mdev = &dev->mt76;
1604*cbb3ec25SBjoern A. Zeeb 	int i, ret;
1605*cbb3ec25SBjoern A. Zeeb 
1606*cbb3ec25SBjoern A. Zeeb 	phy2 = mt7996_phy2(dev);
1607*cbb3ec25SBjoern A. Zeeb 	phy3 = mt7996_phy3(dev);
1608*cbb3ec25SBjoern A. Zeeb 
1609*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2) {
1610*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1611*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1612*cbb3ec25SBjoern A. Zeeb 	}
1613*cbb3ec25SBjoern A. Zeeb 
1614*cbb3ec25SBjoern A. Zeeb 	if (dev_is_pci(mdev->dev)) {
1615*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1616*cbb3ec25SBjoern A. Zeeb 		if (dev->hif2)
1617*cbb3ec25SBjoern A. Zeeb 			mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1618*cbb3ec25SBjoern A. Zeeb 	}
1619*cbb3ec25SBjoern A. Zeeb 
1620*cbb3ec25SBjoern A. Zeeb 	set_bit(MT76_RESET, &dev->mphy.state);
1621*cbb3ec25SBjoern A. Zeeb 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1622*cbb3ec25SBjoern A. Zeeb 	wake_up(&dev->mt76.mcu.wait);
1623*cbb3ec25SBjoern A. Zeeb 	if (phy2) {
1624*cbb3ec25SBjoern A. Zeeb 		set_bit(MT76_RESET, &phy2->mt76->state);
1625*cbb3ec25SBjoern A. Zeeb 		set_bit(MT76_MCU_RESET, &phy2->mt76->state);
1626*cbb3ec25SBjoern A. Zeeb 	}
1627*cbb3ec25SBjoern A. Zeeb 	if (phy3) {
1628*cbb3ec25SBjoern A. Zeeb 		set_bit(MT76_RESET, &phy3->mt76->state);
1629*cbb3ec25SBjoern A. Zeeb 		set_bit(MT76_MCU_RESET, &phy3->mt76->state);
1630*cbb3ec25SBjoern A. Zeeb 	}
1631*cbb3ec25SBjoern A. Zeeb 
1632*cbb3ec25SBjoern A. Zeeb 	/* lock/unlock all queues to ensure that no tx is pending */
1633*cbb3ec25SBjoern A. Zeeb 	mt76_txq_schedule_all(&dev->mphy);
1634*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1635*cbb3ec25SBjoern A. Zeeb 		mt76_txq_schedule_all(phy2->mt76);
1636*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1637*cbb3ec25SBjoern A. Zeeb 		mt76_txq_schedule_all(phy3->mt76);
1638*cbb3ec25SBjoern A. Zeeb 
1639*cbb3ec25SBjoern A. Zeeb 	/* disable all tx/rx napi */
1640*cbb3ec25SBjoern A. Zeeb 	mt76_worker_disable(&dev->mt76.tx_worker);
1641*cbb3ec25SBjoern A. Zeeb 	mt76_for_each_q_rx(mdev, i) {
1642*cbb3ec25SBjoern A. Zeeb 		if (mdev->q_rx[i].ndesc)
1643*cbb3ec25SBjoern A. Zeeb 			napi_disable(&dev->mt76.napi[i]);
1644*cbb3ec25SBjoern A. Zeeb 	}
1645*cbb3ec25SBjoern A. Zeeb 	napi_disable(&dev->mt76.tx_napi);
1646*cbb3ec25SBjoern A. Zeeb 
1647*cbb3ec25SBjoern A. Zeeb 	/* token reinit */
1648*cbb3ec25SBjoern A. Zeeb 	mt7996_tx_token_put(dev);
1649*cbb3ec25SBjoern A. Zeeb 	idr_init(&dev->mt76.token);
1650*cbb3ec25SBjoern A. Zeeb 
1651*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_reset(dev, true);
1652*cbb3ec25SBjoern A. Zeeb 
1653*cbb3ec25SBjoern A. Zeeb 	local_bh_disable();
1654*cbb3ec25SBjoern A. Zeeb 	mt76_for_each_q_rx(mdev, i) {
1655*cbb3ec25SBjoern A. Zeeb 		if (mdev->q_rx[i].ndesc) {
1656*cbb3ec25SBjoern A. Zeeb 			napi_enable(&dev->mt76.napi[i]);
1657*cbb3ec25SBjoern A. Zeeb 			napi_schedule(&dev->mt76.napi[i]);
1658*cbb3ec25SBjoern A. Zeeb 		}
1659*cbb3ec25SBjoern A. Zeeb 	}
1660*cbb3ec25SBjoern A. Zeeb 	local_bh_enable();
1661*cbb3ec25SBjoern A. Zeeb 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1662*cbb3ec25SBjoern A. Zeeb 	clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1663*cbb3ec25SBjoern A. Zeeb 
1664*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1665*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1666*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2) {
1667*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1668*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1669*cbb3ec25SBjoern A. Zeeb 	}
1670*cbb3ec25SBjoern A. Zeeb 	if (dev_is_pci(mdev->dev)) {
1671*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1672*cbb3ec25SBjoern A. Zeeb 		if (dev->hif2)
1673*cbb3ec25SBjoern A. Zeeb 			mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1674*cbb3ec25SBjoern A. Zeeb 	}
1675*cbb3ec25SBjoern A. Zeeb 
1676*cbb3ec25SBjoern A. Zeeb 	/* load firmware */
1677*cbb3ec25SBjoern A. Zeeb 	ret = mt7996_mcu_init_firmware(dev);
1678*cbb3ec25SBjoern A. Zeeb 	if (ret)
1679*cbb3ec25SBjoern A. Zeeb 		goto out;
1680*cbb3ec25SBjoern A. Zeeb 
1681*cbb3ec25SBjoern A. Zeeb 	/* set the necessary init items */
1682*cbb3ec25SBjoern A. Zeeb 	ret = mt7996_mcu_set_eeprom(dev);
1683*cbb3ec25SBjoern A. Zeeb 	if (ret)
1684*cbb3ec25SBjoern A. Zeeb 		goto out;
1685*cbb3ec25SBjoern A. Zeeb 
1686*cbb3ec25SBjoern A. Zeeb 	mt7996_mac_init(dev);
1687*cbb3ec25SBjoern A. Zeeb 	mt7996_init_txpower(dev, &dev->mphy.sband_2g.sband);
1688*cbb3ec25SBjoern A. Zeeb 	mt7996_init_txpower(dev, &dev->mphy.sband_5g.sband);
1689*cbb3ec25SBjoern A. Zeeb 	mt7996_init_txpower(dev, &dev->mphy.sband_6g.sband);
1690*cbb3ec25SBjoern A. Zeeb 	ret = mt7996_txbf_init(dev);
1691*cbb3ec25SBjoern A. Zeeb 
1692*cbb3ec25SBjoern A. Zeeb 	if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1693*cbb3ec25SBjoern A. Zeeb 		ret = mt7996_run(dev->mphy.hw);
1694*cbb3ec25SBjoern A. Zeeb 		if (ret)
1695*cbb3ec25SBjoern A. Zeeb 			goto out;
1696*cbb3ec25SBjoern A. Zeeb 	}
1697*cbb3ec25SBjoern A. Zeeb 
1698*cbb3ec25SBjoern A. Zeeb 	if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
1699*cbb3ec25SBjoern A. Zeeb 		ret = mt7996_run(phy2->mt76->hw);
1700*cbb3ec25SBjoern A. Zeeb 		if (ret)
1701*cbb3ec25SBjoern A. Zeeb 			goto out;
1702*cbb3ec25SBjoern A. Zeeb 	}
1703*cbb3ec25SBjoern A. Zeeb 
1704*cbb3ec25SBjoern A. Zeeb 	if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
1705*cbb3ec25SBjoern A. Zeeb 		ret = mt7996_run(phy3->mt76->hw);
1706*cbb3ec25SBjoern A. Zeeb 		if (ret)
1707*cbb3ec25SBjoern A. Zeeb 			goto out;
1708*cbb3ec25SBjoern A. Zeeb 	}
1709*cbb3ec25SBjoern A. Zeeb 
1710*cbb3ec25SBjoern A. Zeeb out:
1711*cbb3ec25SBjoern A. Zeeb 	/* reset done */
1712*cbb3ec25SBjoern A. Zeeb 	clear_bit(MT76_RESET, &dev->mphy.state);
1713*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1714*cbb3ec25SBjoern A. Zeeb 		clear_bit(MT76_RESET, &phy2->mt76->state);
1715*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1716*cbb3ec25SBjoern A. Zeeb 		clear_bit(MT76_RESET, &phy3->mt76->state);
1717*cbb3ec25SBjoern A. Zeeb 
1718*cbb3ec25SBjoern A. Zeeb 	local_bh_disable();
1719*cbb3ec25SBjoern A. Zeeb 	napi_enable(&dev->mt76.tx_napi);
1720*cbb3ec25SBjoern A. Zeeb 	napi_schedule(&dev->mt76.tx_napi);
1721*cbb3ec25SBjoern A. Zeeb 	local_bh_enable();
1722*cbb3ec25SBjoern A. Zeeb 
1723*cbb3ec25SBjoern A. Zeeb 	mt76_worker_enable(&dev->mt76.tx_worker);
1724*cbb3ec25SBjoern A. Zeeb 	return ret;
1725*cbb3ec25SBjoern A. Zeeb }
1726*cbb3ec25SBjoern A. Zeeb 
1727*cbb3ec25SBjoern A. Zeeb static void
1728*cbb3ec25SBjoern A. Zeeb mt7996_mac_full_reset(struct mt7996_dev *dev)
1729*cbb3ec25SBjoern A. Zeeb {
1730*cbb3ec25SBjoern A. Zeeb 	struct mt7996_phy *phy2, *phy3;
1731*cbb3ec25SBjoern A. Zeeb 	int i;
1732*cbb3ec25SBjoern A. Zeeb 
1733*cbb3ec25SBjoern A. Zeeb 	phy2 = mt7996_phy2(dev);
1734*cbb3ec25SBjoern A. Zeeb 	phy3 = mt7996_phy3(dev);
1735*cbb3ec25SBjoern A. Zeeb 	dev->recovery.hw_full_reset = true;
1736*cbb3ec25SBjoern A. Zeeb 
1737*cbb3ec25SBjoern A. Zeeb 	wake_up(&dev->mt76.mcu.wait);
1738*cbb3ec25SBjoern A. Zeeb 	ieee80211_stop_queues(mt76_hw(dev));
1739*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1740*cbb3ec25SBjoern A. Zeeb 		ieee80211_stop_queues(phy2->mt76->hw);
1741*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1742*cbb3ec25SBjoern A. Zeeb 		ieee80211_stop_queues(phy3->mt76->hw);
1743*cbb3ec25SBjoern A. Zeeb 
1744*cbb3ec25SBjoern A. Zeeb 	cancel_delayed_work_sync(&dev->mphy.mac_work);
1745*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1746*cbb3ec25SBjoern A. Zeeb 		cancel_delayed_work_sync(&phy2->mt76->mac_work);
1747*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1748*cbb3ec25SBjoern A. Zeeb 		cancel_delayed_work_sync(&phy3->mt76->mac_work);
1749*cbb3ec25SBjoern A. Zeeb 
1750*cbb3ec25SBjoern A. Zeeb 	mutex_lock(&dev->mt76.mutex);
1751*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < 10; i++) {
1752*cbb3ec25SBjoern A. Zeeb 		if (!mt7996_mac_restart(dev))
1753*cbb3ec25SBjoern A. Zeeb 			break;
1754*cbb3ec25SBjoern A. Zeeb 	}
1755*cbb3ec25SBjoern A. Zeeb 	mutex_unlock(&dev->mt76.mutex);
1756*cbb3ec25SBjoern A. Zeeb 
1757*cbb3ec25SBjoern A. Zeeb 	if (i == 10)
1758*cbb3ec25SBjoern A. Zeeb 		dev_err(dev->mt76.dev, "chip full reset failed\n");
1759*cbb3ec25SBjoern A. Zeeb 
1760*cbb3ec25SBjoern A. Zeeb 	ieee80211_restart_hw(mt76_hw(dev));
1761*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1762*cbb3ec25SBjoern A. Zeeb 		ieee80211_restart_hw(phy2->mt76->hw);
1763*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1764*cbb3ec25SBjoern A. Zeeb 		ieee80211_restart_hw(phy3->mt76->hw);
1765*cbb3ec25SBjoern A. Zeeb 
1766*cbb3ec25SBjoern A. Zeeb 	ieee80211_wake_queues(mt76_hw(dev));
1767*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1768*cbb3ec25SBjoern A. Zeeb 		ieee80211_wake_queues(phy2->mt76->hw);
1769*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1770*cbb3ec25SBjoern A. Zeeb 		ieee80211_wake_queues(phy3->mt76->hw);
1771*cbb3ec25SBjoern A. Zeeb 
1772*cbb3ec25SBjoern A. Zeeb 	dev->recovery.hw_full_reset = false;
1773*cbb3ec25SBjoern A. Zeeb 	ieee80211_queue_delayed_work(mt76_hw(dev),
1774*cbb3ec25SBjoern A. Zeeb 				     &dev->mphy.mac_work,
1775*cbb3ec25SBjoern A. Zeeb 				     MT7996_WATCHDOG_TIME);
1776*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1777*cbb3ec25SBjoern A. Zeeb 		ieee80211_queue_delayed_work(phy2->mt76->hw,
1778*cbb3ec25SBjoern A. Zeeb 					     &phy2->mt76->mac_work,
1779*cbb3ec25SBjoern A. Zeeb 					     MT7996_WATCHDOG_TIME);
1780*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1781*cbb3ec25SBjoern A. Zeeb 		ieee80211_queue_delayed_work(phy3->mt76->hw,
1782*cbb3ec25SBjoern A. Zeeb 					     &phy3->mt76->mac_work,
1783*cbb3ec25SBjoern A. Zeeb 					     MT7996_WATCHDOG_TIME);
1784*cbb3ec25SBjoern A. Zeeb }
1785*cbb3ec25SBjoern A. Zeeb 
1786*cbb3ec25SBjoern A. Zeeb void mt7996_mac_reset_work(struct work_struct *work)
1787*cbb3ec25SBjoern A. Zeeb {
1788*cbb3ec25SBjoern A. Zeeb 	struct mt7996_phy *phy2, *phy3;
1789*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev;
1790*cbb3ec25SBjoern A. Zeeb 	int i;
1791*cbb3ec25SBjoern A. Zeeb 
1792*cbb3ec25SBjoern A. Zeeb 	dev = container_of(work, struct mt7996_dev, reset_work);
1793*cbb3ec25SBjoern A. Zeeb 	phy2 = mt7996_phy2(dev);
1794*cbb3ec25SBjoern A. Zeeb 	phy3 = mt7996_phy3(dev);
1795*cbb3ec25SBjoern A. Zeeb 
1796*cbb3ec25SBjoern A. Zeeb 	/* chip full reset */
1797*cbb3ec25SBjoern A. Zeeb 	if (dev->recovery.restart) {
1798*cbb3ec25SBjoern A. Zeeb 		/* disable WA/WM WDT */
1799*cbb3ec25SBjoern A. Zeeb 		mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1800*cbb3ec25SBjoern A. Zeeb 			   MT_MCU_CMD_WDT_MASK);
1801*cbb3ec25SBjoern A. Zeeb 
1802*cbb3ec25SBjoern A. Zeeb 		if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1803*cbb3ec25SBjoern A. Zeeb 			dev->recovery.wa_reset_count++;
1804*cbb3ec25SBjoern A. Zeeb 		else
1805*cbb3ec25SBjoern A. Zeeb 			dev->recovery.wm_reset_count++;
1806*cbb3ec25SBjoern A. Zeeb 
1807*cbb3ec25SBjoern A. Zeeb 		mt7996_mac_full_reset(dev);
1808*cbb3ec25SBjoern A. Zeeb 
1809*cbb3ec25SBjoern A. Zeeb 		/* enable mcu irq */
1810*cbb3ec25SBjoern A. Zeeb 		mt7996_irq_enable(dev, MT_INT_MCU_CMD);
1811*cbb3ec25SBjoern A. Zeeb 		mt7996_irq_disable(dev, 0);
1812*cbb3ec25SBjoern A. Zeeb 
1813*cbb3ec25SBjoern A. Zeeb 		/* enable WA/WM WDT */
1814*cbb3ec25SBjoern A. Zeeb 		mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1815*cbb3ec25SBjoern A. Zeeb 
1816*cbb3ec25SBjoern A. Zeeb 		dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1817*cbb3ec25SBjoern A. Zeeb 		dev->recovery.restart = false;
1818*cbb3ec25SBjoern A. Zeeb 		return;
1819*cbb3ec25SBjoern A. Zeeb 	}
1820*cbb3ec25SBjoern A. Zeeb 
1821*cbb3ec25SBjoern A. Zeeb 	if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1822*cbb3ec25SBjoern A. Zeeb 		return;
1823*cbb3ec25SBjoern A. Zeeb 
1824*cbb3ec25SBjoern A. Zeeb 	dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
1825*cbb3ec25SBjoern A. Zeeb 		 wiphy_name(dev->mt76.hw->wiphy));
1826*cbb3ec25SBjoern A. Zeeb 	ieee80211_stop_queues(mt76_hw(dev));
1827*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1828*cbb3ec25SBjoern A. Zeeb 		ieee80211_stop_queues(phy2->mt76->hw);
1829*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1830*cbb3ec25SBjoern A. Zeeb 		ieee80211_stop_queues(phy3->mt76->hw);
1831*cbb3ec25SBjoern A. Zeeb 
1832*cbb3ec25SBjoern A. Zeeb 	set_bit(MT76_RESET, &dev->mphy.state);
1833*cbb3ec25SBjoern A. Zeeb 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1834*cbb3ec25SBjoern A. Zeeb 	wake_up(&dev->mt76.mcu.wait);
1835*cbb3ec25SBjoern A. Zeeb 	cancel_delayed_work_sync(&dev->mphy.mac_work);
1836*cbb3ec25SBjoern A. Zeeb 	if (phy2) {
1837*cbb3ec25SBjoern A. Zeeb 		set_bit(MT76_RESET, &phy2->mt76->state);
1838*cbb3ec25SBjoern A. Zeeb 		cancel_delayed_work_sync(&phy2->mt76->mac_work);
1839*cbb3ec25SBjoern A. Zeeb 	}
1840*cbb3ec25SBjoern A. Zeeb 	if (phy3) {
1841*cbb3ec25SBjoern A. Zeeb 		set_bit(MT76_RESET, &phy3->mt76->state);
1842*cbb3ec25SBjoern A. Zeeb 		cancel_delayed_work_sync(&phy3->mt76->mac_work);
1843*cbb3ec25SBjoern A. Zeeb 	}
1844*cbb3ec25SBjoern A. Zeeb 	mt76_worker_disable(&dev->mt76.tx_worker);
1845*cbb3ec25SBjoern A. Zeeb 	mt76_for_each_q_rx(&dev->mt76, i)
1846*cbb3ec25SBjoern A. Zeeb 		napi_disable(&dev->mt76.napi[i]);
1847*cbb3ec25SBjoern A. Zeeb 	napi_disable(&dev->mt76.tx_napi);
1848*cbb3ec25SBjoern A. Zeeb 
1849*cbb3ec25SBjoern A. Zeeb 	mutex_lock(&dev->mt76.mutex);
1850*cbb3ec25SBjoern A. Zeeb 
1851*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1852*cbb3ec25SBjoern A. Zeeb 
1853*cbb3ec25SBjoern A. Zeeb 	if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1854*cbb3ec25SBjoern A. Zeeb 		mt7996_dma_reset(dev, false);
1855*cbb3ec25SBjoern A. Zeeb 
1856*cbb3ec25SBjoern A. Zeeb 		mt7996_tx_token_put(dev);
1857*cbb3ec25SBjoern A. Zeeb 		idr_init(&dev->mt76.token);
1858*cbb3ec25SBjoern A. Zeeb 
1859*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1860*cbb3ec25SBjoern A. Zeeb 		mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1861*cbb3ec25SBjoern A. Zeeb 	}
1862*cbb3ec25SBjoern A. Zeeb 
1863*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1864*cbb3ec25SBjoern A. Zeeb 	mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1865*cbb3ec25SBjoern A. Zeeb 
1866*cbb3ec25SBjoern A. Zeeb 	/* enable DMA Tx/Tx and interrupt */
1867*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_start(dev, false);
1868*cbb3ec25SBjoern A. Zeeb 
1869*cbb3ec25SBjoern A. Zeeb 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1870*cbb3ec25SBjoern A. Zeeb 	clear_bit(MT76_RESET, &dev->mphy.state);
1871*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1872*cbb3ec25SBjoern A. Zeeb 		clear_bit(MT76_RESET, &phy2->mt76->state);
1873*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1874*cbb3ec25SBjoern A. Zeeb 		clear_bit(MT76_RESET, &phy3->mt76->state);
1875*cbb3ec25SBjoern A. Zeeb 
1876*cbb3ec25SBjoern A. Zeeb 	local_bh_disable();
1877*cbb3ec25SBjoern A. Zeeb 	mt76_for_each_q_rx(&dev->mt76, i) {
1878*cbb3ec25SBjoern A. Zeeb 		napi_enable(&dev->mt76.napi[i]);
1879*cbb3ec25SBjoern A. Zeeb 		napi_schedule(&dev->mt76.napi[i]);
1880*cbb3ec25SBjoern A. Zeeb 	}
1881*cbb3ec25SBjoern A. Zeeb 	local_bh_enable();
1882*cbb3ec25SBjoern A. Zeeb 
1883*cbb3ec25SBjoern A. Zeeb 	tasklet_schedule(&dev->mt76.irq_tasklet);
1884*cbb3ec25SBjoern A. Zeeb 
1885*cbb3ec25SBjoern A. Zeeb 	mt76_worker_enable(&dev->mt76.tx_worker);
1886*cbb3ec25SBjoern A. Zeeb 
1887*cbb3ec25SBjoern A. Zeeb 	local_bh_disable();
1888*cbb3ec25SBjoern A. Zeeb 	napi_enable(&dev->mt76.tx_napi);
1889*cbb3ec25SBjoern A. Zeeb 	napi_schedule(&dev->mt76.tx_napi);
1890*cbb3ec25SBjoern A. Zeeb 	local_bh_enable();
1891*cbb3ec25SBjoern A. Zeeb 
1892*cbb3ec25SBjoern A. Zeeb 	ieee80211_wake_queues(mt76_hw(dev));
1893*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1894*cbb3ec25SBjoern A. Zeeb 		ieee80211_wake_queues(phy2->mt76->hw);
1895*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1896*cbb3ec25SBjoern A. Zeeb 		ieee80211_wake_queues(phy3->mt76->hw);
1897*cbb3ec25SBjoern A. Zeeb 
1898*cbb3ec25SBjoern A. Zeeb 	mutex_unlock(&dev->mt76.mutex);
1899*cbb3ec25SBjoern A. Zeeb 
1900*cbb3ec25SBjoern A. Zeeb 	mt7996_update_beacons(dev);
1901*cbb3ec25SBjoern A. Zeeb 
1902*cbb3ec25SBjoern A. Zeeb 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1903*cbb3ec25SBjoern A. Zeeb 				     MT7996_WATCHDOG_TIME);
1904*cbb3ec25SBjoern A. Zeeb 	if (phy2)
1905*cbb3ec25SBjoern A. Zeeb 		ieee80211_queue_delayed_work(phy2->mt76->hw,
1906*cbb3ec25SBjoern A. Zeeb 					     &phy2->mt76->mac_work,
1907*cbb3ec25SBjoern A. Zeeb 					     MT7996_WATCHDOG_TIME);
1908*cbb3ec25SBjoern A. Zeeb 	if (phy3)
1909*cbb3ec25SBjoern A. Zeeb 		ieee80211_queue_delayed_work(phy3->mt76->hw,
1910*cbb3ec25SBjoern A. Zeeb 					     &phy3->mt76->mac_work,
1911*cbb3ec25SBjoern A. Zeeb 					     MT7996_WATCHDOG_TIME);
1912*cbb3ec25SBjoern A. Zeeb 	dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
1913*cbb3ec25SBjoern A. Zeeb 		 wiphy_name(dev->mt76.hw->wiphy));
1914*cbb3ec25SBjoern A. Zeeb }
1915*cbb3ec25SBjoern A. Zeeb 
1916*cbb3ec25SBjoern A. Zeeb /* firmware coredump */
1917*cbb3ec25SBjoern A. Zeeb void mt7996_mac_dump_work(struct work_struct *work)
1918*cbb3ec25SBjoern A. Zeeb {
1919*cbb3ec25SBjoern A. Zeeb 	const struct mt7996_mem_region *mem_region;
1920*cbb3ec25SBjoern A. Zeeb 	struct mt7996_crash_data *crash_data;
1921*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev;
1922*cbb3ec25SBjoern A. Zeeb 	struct mt7996_mem_hdr *hdr;
1923*cbb3ec25SBjoern A. Zeeb 	size_t buf_len;
1924*cbb3ec25SBjoern A. Zeeb 	int i;
1925*cbb3ec25SBjoern A. Zeeb 	u32 num;
1926*cbb3ec25SBjoern A. Zeeb 	u8 *buf;
1927*cbb3ec25SBjoern A. Zeeb 
1928*cbb3ec25SBjoern A. Zeeb 	dev = container_of(work, struct mt7996_dev, dump_work);
1929*cbb3ec25SBjoern A. Zeeb 
1930*cbb3ec25SBjoern A. Zeeb 	mutex_lock(&dev->dump_mutex);
1931*cbb3ec25SBjoern A. Zeeb 
1932*cbb3ec25SBjoern A. Zeeb 	crash_data = mt7996_coredump_new(dev);
1933*cbb3ec25SBjoern A. Zeeb 	if (!crash_data) {
1934*cbb3ec25SBjoern A. Zeeb 		mutex_unlock(&dev->dump_mutex);
1935*cbb3ec25SBjoern A. Zeeb 		goto skip_coredump;
1936*cbb3ec25SBjoern A. Zeeb 	}
1937*cbb3ec25SBjoern A. Zeeb 
1938*cbb3ec25SBjoern A. Zeeb 	mem_region = mt7996_coredump_get_mem_layout(dev, &num);
1939*cbb3ec25SBjoern A. Zeeb 	if (!mem_region || !crash_data->memdump_buf_len) {
1940*cbb3ec25SBjoern A. Zeeb 		mutex_unlock(&dev->dump_mutex);
1941*cbb3ec25SBjoern A. Zeeb 		goto skip_memdump;
1942*cbb3ec25SBjoern A. Zeeb 	}
1943*cbb3ec25SBjoern A. Zeeb 
1944*cbb3ec25SBjoern A. Zeeb 	buf = crash_data->memdump_buf;
1945*cbb3ec25SBjoern A. Zeeb 	buf_len = crash_data->memdump_buf_len;
1946*cbb3ec25SBjoern A. Zeeb 
1947*cbb3ec25SBjoern A. Zeeb 	/* dumping memory content... */
1948*cbb3ec25SBjoern A. Zeeb 	memset(buf, 0, buf_len);
1949*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < num; i++) {
1950*cbb3ec25SBjoern A. Zeeb 		if (mem_region->len > buf_len) {
1951*cbb3ec25SBjoern A. Zeeb 			dev_warn(dev->mt76.dev, "%s len %zu is too large\n",
1952*cbb3ec25SBjoern A. Zeeb 				 mem_region->name, mem_region->len);
1953*cbb3ec25SBjoern A. Zeeb 			break;
1954*cbb3ec25SBjoern A. Zeeb 		}
1955*cbb3ec25SBjoern A. Zeeb 
1956*cbb3ec25SBjoern A. Zeeb 		/* reserve space for the header */
1957*cbb3ec25SBjoern A. Zeeb 		hdr = (void *)buf;
1958*cbb3ec25SBjoern A. Zeeb 		buf += sizeof(*hdr);
1959*cbb3ec25SBjoern A. Zeeb 		buf_len -= sizeof(*hdr);
1960*cbb3ec25SBjoern A. Zeeb 
1961*cbb3ec25SBjoern A. Zeeb 		mt7996_memcpy_fromio(dev, buf, mem_region->start,
1962*cbb3ec25SBjoern A. Zeeb 				     mem_region->len);
1963*cbb3ec25SBjoern A. Zeeb 
1964*cbb3ec25SBjoern A. Zeeb 		hdr->start = mem_region->start;
1965*cbb3ec25SBjoern A. Zeeb 		hdr->len = mem_region->len;
1966*cbb3ec25SBjoern A. Zeeb 
1967*cbb3ec25SBjoern A. Zeeb 		if (!mem_region->len)
1968*cbb3ec25SBjoern A. Zeeb 			/* note: the header remains, just with zero length */
1969*cbb3ec25SBjoern A. Zeeb 			break;
1970*cbb3ec25SBjoern A. Zeeb 
1971*cbb3ec25SBjoern A. Zeeb 		buf += mem_region->len;
1972*cbb3ec25SBjoern A. Zeeb 		buf_len -= mem_region->len;
1973*cbb3ec25SBjoern A. Zeeb 
1974*cbb3ec25SBjoern A. Zeeb 		mem_region++;
1975*cbb3ec25SBjoern A. Zeeb 	}
1976*cbb3ec25SBjoern A. Zeeb 
1977*cbb3ec25SBjoern A. Zeeb 	mutex_unlock(&dev->dump_mutex);
1978*cbb3ec25SBjoern A. Zeeb 
1979*cbb3ec25SBjoern A. Zeeb skip_memdump:
1980*cbb3ec25SBjoern A. Zeeb 	mt7996_coredump_submit(dev);
1981*cbb3ec25SBjoern A. Zeeb skip_coredump:
1982*cbb3ec25SBjoern A. Zeeb 	queue_work(dev->mt76.wq, &dev->reset_work);
1983*cbb3ec25SBjoern A. Zeeb }
1984*cbb3ec25SBjoern A. Zeeb 
1985*cbb3ec25SBjoern A. Zeeb void mt7996_reset(struct mt7996_dev *dev)
1986*cbb3ec25SBjoern A. Zeeb {
1987*cbb3ec25SBjoern A. Zeeb 	if (!dev->recovery.hw_init_done)
1988*cbb3ec25SBjoern A. Zeeb 		return;
1989*cbb3ec25SBjoern A. Zeeb 
1990*cbb3ec25SBjoern A. Zeeb 	if (dev->recovery.hw_full_reset)
1991*cbb3ec25SBjoern A. Zeeb 		return;
1992*cbb3ec25SBjoern A. Zeeb 
1993*cbb3ec25SBjoern A. Zeeb 	/* wm/wa exception: do full recovery */
1994*cbb3ec25SBjoern A. Zeeb 	if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
1995*cbb3ec25SBjoern A. Zeeb 		dev->recovery.restart = true;
1996*cbb3ec25SBjoern A. Zeeb 		dev_info(dev->mt76.dev,
1997*cbb3ec25SBjoern A. Zeeb 			 "%s indicated firmware crash, attempting recovery\n",
1998*cbb3ec25SBjoern A. Zeeb 			 wiphy_name(dev->mt76.hw->wiphy));
1999*cbb3ec25SBjoern A. Zeeb 
2000*cbb3ec25SBjoern A. Zeeb 		mt7996_irq_disable(dev, MT_INT_MCU_CMD);
2001*cbb3ec25SBjoern A. Zeeb 		queue_work(dev->mt76.wq, &dev->dump_work);
2002*cbb3ec25SBjoern A. Zeeb 		return;
2003*cbb3ec25SBjoern A. Zeeb 	}
2004*cbb3ec25SBjoern A. Zeeb 
2005*cbb3ec25SBjoern A. Zeeb 	queue_work(dev->mt76.wq, &dev->reset_work);
2006*cbb3ec25SBjoern A. Zeeb 	wake_up(&dev->reset_wait);
2007*cbb3ec25SBjoern A. Zeeb }
2008*cbb3ec25SBjoern A. Zeeb 
2009*cbb3ec25SBjoern A. Zeeb void mt7996_mac_update_stats(struct mt7996_phy *phy)
2010*cbb3ec25SBjoern A. Zeeb {
2011*cbb3ec25SBjoern A. Zeeb 	struct mt76_mib_stats *mib = &phy->mib;
2012*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = phy->dev;
2013*cbb3ec25SBjoern A. Zeeb 	u8 band_idx = phy->mt76->band_idx;
2014*cbb3ec25SBjoern A. Zeeb 	u32 cnt;
2015*cbb3ec25SBjoern A. Zeeb 	int i;
2016*cbb3ec25SBjoern A. Zeeb 
2017*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
2018*cbb3ec25SBjoern A. Zeeb 	mib->fcs_err_cnt += cnt;
2019*cbb3ec25SBjoern A. Zeeb 
2020*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
2021*cbb3ec25SBjoern A. Zeeb 	mib->rx_fifo_full_cnt += cnt;
2022*cbb3ec25SBjoern A. Zeeb 
2023*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
2024*cbb3ec25SBjoern A. Zeeb 	mib->rx_mpdu_cnt += cnt;
2025*cbb3ec25SBjoern A. Zeeb 
2026*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
2027*cbb3ec25SBjoern A. Zeeb 	mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
2028*cbb3ec25SBjoern A. Zeeb 
2029*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
2030*cbb3ec25SBjoern A. Zeeb 	mib->rx_vector_mismatch_cnt += cnt;
2031*cbb3ec25SBjoern A. Zeeb 
2032*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
2033*cbb3ec25SBjoern A. Zeeb 	mib->rx_delimiter_fail_cnt += cnt;
2034*cbb3ec25SBjoern A. Zeeb 
2035*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
2036*cbb3ec25SBjoern A. Zeeb 	mib->rx_len_mismatch_cnt += cnt;
2037*cbb3ec25SBjoern A. Zeeb 
2038*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
2039*cbb3ec25SBjoern A. Zeeb 	mib->tx_ampdu_cnt += cnt;
2040*cbb3ec25SBjoern A. Zeeb 
2041*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
2042*cbb3ec25SBjoern A. Zeeb 	mib->tx_stop_q_empty_cnt += cnt;
2043*cbb3ec25SBjoern A. Zeeb 
2044*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
2045*cbb3ec25SBjoern A. Zeeb 	mib->tx_mpdu_attempts_cnt += cnt;
2046*cbb3ec25SBjoern A. Zeeb 
2047*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2048*cbb3ec25SBjoern A. Zeeb 	mib->tx_mpdu_success_cnt += cnt;
2049*cbb3ec25SBjoern A. Zeeb 
2050*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2051*cbb3ec25SBjoern A. Zeeb 	mib->rx_ampdu_cnt += cnt;
2052*cbb3ec25SBjoern A. Zeeb 
2053*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2054*cbb3ec25SBjoern A. Zeeb 	mib->rx_ampdu_bytes_cnt += cnt;
2055*cbb3ec25SBjoern A. Zeeb 
2056*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2057*cbb3ec25SBjoern A. Zeeb 	mib->rx_ampdu_valid_subframe_cnt += cnt;
2058*cbb3ec25SBjoern A. Zeeb 
2059*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2060*cbb3ec25SBjoern A. Zeeb 	mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2061*cbb3ec25SBjoern A. Zeeb 
2062*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2063*cbb3ec25SBjoern A. Zeeb 	mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2064*cbb3ec25SBjoern A. Zeeb 
2065*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2066*cbb3ec25SBjoern A. Zeeb 	mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2067*cbb3ec25SBjoern A. Zeeb 
2068*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2069*cbb3ec25SBjoern A. Zeeb 	mib->rx_pfdrop_cnt += cnt;
2070*cbb3ec25SBjoern A. Zeeb 
2071*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2072*cbb3ec25SBjoern A. Zeeb 	mib->rx_vec_queue_overflow_drop_cnt += cnt;
2073*cbb3ec25SBjoern A. Zeeb 
2074*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2075*cbb3ec25SBjoern A. Zeeb 	mib->rx_ba_cnt += cnt;
2076*cbb3ec25SBjoern A. Zeeb 
2077*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2078*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_ebf_ppdu_cnt += cnt;
2079*cbb3ec25SBjoern A. Zeeb 
2080*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2081*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_ibf_ppdu_cnt += cnt;
2082*cbb3ec25SBjoern A. Zeeb 
2083*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2084*cbb3ec25SBjoern A. Zeeb 	mib->tx_mu_bf_cnt += cnt;
2085*cbb3ec25SBjoern A. Zeeb 
2086*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2087*cbb3ec25SBjoern A. Zeeb 	mib->tx_mu_mpdu_cnt += cnt;
2088*cbb3ec25SBjoern A. Zeeb 
2089*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2090*cbb3ec25SBjoern A. Zeeb 	mib->tx_mu_acked_mpdu_cnt += cnt;
2091*cbb3ec25SBjoern A. Zeeb 
2092*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2093*cbb3ec25SBjoern A. Zeeb 	mib->tx_su_acked_mpdu_cnt += cnt;
2094*cbb3ec25SBjoern A. Zeeb 
2095*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2096*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_ht_cnt += cnt;
2097*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_all_cnt += cnt;
2098*cbb3ec25SBjoern A. Zeeb 
2099*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2100*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_vht_cnt += cnt;
2101*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_all_cnt += cnt;
2102*cbb3ec25SBjoern A. Zeeb 
2103*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2104*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_he_cnt += cnt;
2105*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_all_cnt += cnt;
2106*cbb3ec25SBjoern A. Zeeb 
2107*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2108*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_eht_cnt += cnt;
2109*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_all_cnt += cnt;
2110*cbb3ec25SBjoern A. Zeeb 
2111*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2112*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2113*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2114*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2115*cbb3ec25SBjoern A. Zeeb 
2116*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2117*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_fb_trig_cnt += cnt;
2118*cbb3ec25SBjoern A. Zeeb 
2119*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2120*cbb3ec25SBjoern A. Zeeb 	mib->tx_bf_fb_cpl_cnt += cnt;
2121*cbb3ec25SBjoern A. Zeeb 
2122*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2123*cbb3ec25SBjoern A. Zeeb 		cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2124*cbb3ec25SBjoern A. Zeeb 		mib->tx_amsdu[i] += cnt;
2125*cbb3ec25SBjoern A. Zeeb 		mib->tx_amsdu_cnt += cnt;
2126*cbb3ec25SBjoern A. Zeeb 	}
2127*cbb3ec25SBjoern A. Zeeb 
2128*cbb3ec25SBjoern A. Zeeb 	/* rts count */
2129*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2130*cbb3ec25SBjoern A. Zeeb 	mib->rts_cnt += cnt;
2131*cbb3ec25SBjoern A. Zeeb 
2132*cbb3ec25SBjoern A. Zeeb 	/* rts retry count */
2133*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2134*cbb3ec25SBjoern A. Zeeb 	mib->rts_retries_cnt += cnt;
2135*cbb3ec25SBjoern A. Zeeb 
2136*cbb3ec25SBjoern A. Zeeb 	/* ba miss count */
2137*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2138*cbb3ec25SBjoern A. Zeeb 	mib->ba_miss_cnt += cnt;
2139*cbb3ec25SBjoern A. Zeeb 
2140*cbb3ec25SBjoern A. Zeeb 	/* ack fail count */
2141*cbb3ec25SBjoern A. Zeeb 	cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2142*cbb3ec25SBjoern A. Zeeb 	mib->ack_fail_cnt += cnt;
2143*cbb3ec25SBjoern A. Zeeb 
2144*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < 16; i++) {
2145*cbb3ec25SBjoern A. Zeeb 		cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2146*cbb3ec25SBjoern A. Zeeb 		phy->mt76->aggr_stats[i] += cnt;
2147*cbb3ec25SBjoern A. Zeeb 	}
2148*cbb3ec25SBjoern A. Zeeb }
2149*cbb3ec25SBjoern A. Zeeb 
2150*cbb3ec25SBjoern A. Zeeb void mt7996_mac_sta_rc_work(struct work_struct *work)
2151*cbb3ec25SBjoern A. Zeeb {
2152*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2153*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_sta *sta;
2154*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_vif *vif;
2155*cbb3ec25SBjoern A. Zeeb 	struct mt7996_sta *msta;
2156*cbb3ec25SBjoern A. Zeeb 	u32 changed;
2157*cbb3ec25SBjoern A. Zeeb 	LIST_HEAD(list);
2158*cbb3ec25SBjoern A. Zeeb 
2159*cbb3ec25SBjoern A. Zeeb 	spin_lock_bh(&dev->mt76.sta_poll_lock);
2160*cbb3ec25SBjoern A. Zeeb 	list_splice_init(&dev->sta_rc_list, &list);
2161*cbb3ec25SBjoern A. Zeeb 
2162*cbb3ec25SBjoern A. Zeeb 	while (!list_empty(&list)) {
2163*cbb3ec25SBjoern A. Zeeb 		msta = list_first_entry(&list, struct mt7996_sta, rc_list);
2164*cbb3ec25SBjoern A. Zeeb 		list_del_init(&msta->rc_list);
2165*cbb3ec25SBjoern A. Zeeb 		changed = msta->changed;
2166*cbb3ec25SBjoern A. Zeeb 		msta->changed = 0;
2167*cbb3ec25SBjoern A. Zeeb 		spin_unlock_bh(&dev->mt76.sta_poll_lock);
2168*cbb3ec25SBjoern A. Zeeb 
2169*cbb3ec25SBjoern A. Zeeb 		sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
2170*cbb3ec25SBjoern A. Zeeb 		vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
2171*cbb3ec25SBjoern A. Zeeb 
2172*cbb3ec25SBjoern A. Zeeb 		if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2173*cbb3ec25SBjoern A. Zeeb 			       IEEE80211_RC_NSS_CHANGED |
2174*cbb3ec25SBjoern A. Zeeb 			       IEEE80211_RC_BW_CHANGED))
2175*cbb3ec25SBjoern A. Zeeb 			mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
2176*cbb3ec25SBjoern A. Zeeb 
2177*cbb3ec25SBjoern A. Zeeb 		/* TODO: smps change */
2178*cbb3ec25SBjoern A. Zeeb 
2179*cbb3ec25SBjoern A. Zeeb 		spin_lock_bh(&dev->mt76.sta_poll_lock);
2180*cbb3ec25SBjoern A. Zeeb 	}
2181*cbb3ec25SBjoern A. Zeeb 
2182*cbb3ec25SBjoern A. Zeeb 	spin_unlock_bh(&dev->mt76.sta_poll_lock);
2183*cbb3ec25SBjoern A. Zeeb }
2184*cbb3ec25SBjoern A. Zeeb 
2185*cbb3ec25SBjoern A. Zeeb void mt7996_mac_work(struct work_struct *work)
2186*cbb3ec25SBjoern A. Zeeb {
2187*cbb3ec25SBjoern A. Zeeb 	struct mt7996_phy *phy;
2188*cbb3ec25SBjoern A. Zeeb 	struct mt76_phy *mphy;
2189*cbb3ec25SBjoern A. Zeeb 
2190*cbb3ec25SBjoern A. Zeeb 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2191*cbb3ec25SBjoern A. Zeeb 					       mac_work.work);
2192*cbb3ec25SBjoern A. Zeeb 	phy = mphy->priv;
2193*cbb3ec25SBjoern A. Zeeb 
2194*cbb3ec25SBjoern A. Zeeb 	mutex_lock(&mphy->dev->mutex);
2195*cbb3ec25SBjoern A. Zeeb 
2196*cbb3ec25SBjoern A. Zeeb 	mt76_update_survey(mphy);
2197*cbb3ec25SBjoern A. Zeeb 	if (++mphy->mac_work_count == 5) {
2198*cbb3ec25SBjoern A. Zeeb 		mphy->mac_work_count = 0;
2199*cbb3ec25SBjoern A. Zeeb 
2200*cbb3ec25SBjoern A. Zeeb 		mt7996_mac_update_stats(phy);
2201*cbb3ec25SBjoern A. Zeeb 	}
2202*cbb3ec25SBjoern A. Zeeb 
2203*cbb3ec25SBjoern A. Zeeb 	mutex_unlock(&mphy->dev->mutex);
2204*cbb3ec25SBjoern A. Zeeb 
2205*cbb3ec25SBjoern A. Zeeb 	mt76_tx_status_check(mphy->dev, false);
2206*cbb3ec25SBjoern A. Zeeb 
2207*cbb3ec25SBjoern A. Zeeb 	ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2208*cbb3ec25SBjoern A. Zeeb 				     MT7996_WATCHDOG_TIME);
2209*cbb3ec25SBjoern A. Zeeb }
2210*cbb3ec25SBjoern A. Zeeb 
2211*cbb3ec25SBjoern A. Zeeb static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2212*cbb3ec25SBjoern A. Zeeb {
2213*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = phy->dev;
2214*cbb3ec25SBjoern A. Zeeb 
2215*cbb3ec25SBjoern A. Zeeb 	if (phy->rdd_state & BIT(0))
2216*cbb3ec25SBjoern A. Zeeb 		mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
2217*cbb3ec25SBjoern A. Zeeb 				   MT_RX_SEL0, 0);
2218*cbb3ec25SBjoern A. Zeeb 	if (phy->rdd_state & BIT(1))
2219*cbb3ec25SBjoern A. Zeeb 		mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
2220*cbb3ec25SBjoern A. Zeeb 				   MT_RX_SEL0, 0);
2221*cbb3ec25SBjoern A. Zeeb }
2222*cbb3ec25SBjoern A. Zeeb 
2223*cbb3ec25SBjoern A. Zeeb static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
2224*cbb3ec25SBjoern A. Zeeb {
2225*cbb3ec25SBjoern A. Zeeb 	int err, region;
2226*cbb3ec25SBjoern A. Zeeb 
2227*cbb3ec25SBjoern A. Zeeb 	switch (dev->mt76.region) {
2228*cbb3ec25SBjoern A. Zeeb 	case NL80211_DFS_ETSI:
2229*cbb3ec25SBjoern A. Zeeb 		region = 0;
2230*cbb3ec25SBjoern A. Zeeb 		break;
2231*cbb3ec25SBjoern A. Zeeb 	case NL80211_DFS_JP:
2232*cbb3ec25SBjoern A. Zeeb 		region = 2;
2233*cbb3ec25SBjoern A. Zeeb 		break;
2234*cbb3ec25SBjoern A. Zeeb 	case NL80211_DFS_FCC:
2235*cbb3ec25SBjoern A. Zeeb 	default:
2236*cbb3ec25SBjoern A. Zeeb 		region = 1;
2237*cbb3ec25SBjoern A. Zeeb 		break;
2238*cbb3ec25SBjoern A. Zeeb 	}
2239*cbb3ec25SBjoern A. Zeeb 
2240*cbb3ec25SBjoern A. Zeeb 	err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
2241*cbb3ec25SBjoern A. Zeeb 				 MT_RX_SEL0, region);
2242*cbb3ec25SBjoern A. Zeeb 	if (err < 0)
2243*cbb3ec25SBjoern A. Zeeb 		return err;
2244*cbb3ec25SBjoern A. Zeeb 
2245*cbb3ec25SBjoern A. Zeeb 	return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
2246*cbb3ec25SBjoern A. Zeeb 				 MT_RX_SEL0, 1);
2247*cbb3ec25SBjoern A. Zeeb }
2248*cbb3ec25SBjoern A. Zeeb 
2249*cbb3ec25SBjoern A. Zeeb static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2250*cbb3ec25SBjoern A. Zeeb {
2251*cbb3ec25SBjoern A. Zeeb 	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2252*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = phy->dev;
2253*cbb3ec25SBjoern A. Zeeb 	u8 band_idx = phy->mt76->band_idx;
2254*cbb3ec25SBjoern A. Zeeb 	int err;
2255*cbb3ec25SBjoern A. Zeeb 
2256*cbb3ec25SBjoern A. Zeeb 	/* start CAC */
2257*cbb3ec25SBjoern A. Zeeb 	err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
2258*cbb3ec25SBjoern A. Zeeb 				 MT_RX_SEL0, 0);
2259*cbb3ec25SBjoern A. Zeeb 	if (err < 0)
2260*cbb3ec25SBjoern A. Zeeb 		return err;
2261*cbb3ec25SBjoern A. Zeeb 
2262*cbb3ec25SBjoern A. Zeeb 	err = mt7996_dfs_start_rdd(dev, band_idx);
2263*cbb3ec25SBjoern A. Zeeb 	if (err < 0)
2264*cbb3ec25SBjoern A. Zeeb 		return err;
2265*cbb3ec25SBjoern A. Zeeb 
2266*cbb3ec25SBjoern A. Zeeb 	phy->rdd_state |= BIT(band_idx);
2267*cbb3ec25SBjoern A. Zeeb 
2268*cbb3ec25SBjoern A. Zeeb 	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2269*cbb3ec25SBjoern A. Zeeb 	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
2270*cbb3ec25SBjoern A. Zeeb 		err = mt7996_dfs_start_rdd(dev, 1);
2271*cbb3ec25SBjoern A. Zeeb 		if (err < 0)
2272*cbb3ec25SBjoern A. Zeeb 			return err;
2273*cbb3ec25SBjoern A. Zeeb 
2274*cbb3ec25SBjoern A. Zeeb 		phy->rdd_state |= BIT(1);
2275*cbb3ec25SBjoern A. Zeeb 	}
2276*cbb3ec25SBjoern A. Zeeb 
2277*cbb3ec25SBjoern A. Zeeb 	return 0;
2278*cbb3ec25SBjoern A. Zeeb }
2279*cbb3ec25SBjoern A. Zeeb 
2280*cbb3ec25SBjoern A. Zeeb static int
2281*cbb3ec25SBjoern A. Zeeb mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
2282*cbb3ec25SBjoern A. Zeeb {
2283*cbb3ec25SBjoern A. Zeeb 	const struct mt7996_dfs_radar_spec *radar_specs;
2284*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = phy->dev;
2285*cbb3ec25SBjoern A. Zeeb 	int err, i;
2286*cbb3ec25SBjoern A. Zeeb 
2287*cbb3ec25SBjoern A. Zeeb 	switch (dev->mt76.region) {
2288*cbb3ec25SBjoern A. Zeeb 	case NL80211_DFS_FCC:
2289*cbb3ec25SBjoern A. Zeeb 		radar_specs = &fcc_radar_specs;
2290*cbb3ec25SBjoern A. Zeeb 		err = mt7996_mcu_set_fcc5_lpn(dev, 8);
2291*cbb3ec25SBjoern A. Zeeb 		if (err < 0)
2292*cbb3ec25SBjoern A. Zeeb 			return err;
2293*cbb3ec25SBjoern A. Zeeb 		break;
2294*cbb3ec25SBjoern A. Zeeb 	case NL80211_DFS_ETSI:
2295*cbb3ec25SBjoern A. Zeeb 		radar_specs = &etsi_radar_specs;
2296*cbb3ec25SBjoern A. Zeeb 		break;
2297*cbb3ec25SBjoern A. Zeeb 	case NL80211_DFS_JP:
2298*cbb3ec25SBjoern A. Zeeb 		radar_specs = &jp_radar_specs;
2299*cbb3ec25SBjoern A. Zeeb 		break;
2300*cbb3ec25SBjoern A. Zeeb 	default:
2301*cbb3ec25SBjoern A. Zeeb 		return -EINVAL;
2302*cbb3ec25SBjoern A. Zeeb 	}
2303*cbb3ec25SBjoern A. Zeeb 
2304*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2305*cbb3ec25SBjoern A. Zeeb 		err = mt7996_mcu_set_radar_th(dev, i,
2306*cbb3ec25SBjoern A. Zeeb 					      &radar_specs->radar_pattern[i]);
2307*cbb3ec25SBjoern A. Zeeb 		if (err < 0)
2308*cbb3ec25SBjoern A. Zeeb 			return err;
2309*cbb3ec25SBjoern A. Zeeb 	}
2310*cbb3ec25SBjoern A. Zeeb 
2311*cbb3ec25SBjoern A. Zeeb 	return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2312*cbb3ec25SBjoern A. Zeeb }
2313*cbb3ec25SBjoern A. Zeeb 
2314*cbb3ec25SBjoern A. Zeeb int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2315*cbb3ec25SBjoern A. Zeeb {
2316*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = phy->dev;
2317*cbb3ec25SBjoern A. Zeeb 	enum mt76_dfs_state dfs_state, prev_state;
2318*cbb3ec25SBjoern A. Zeeb 	int err;
2319*cbb3ec25SBjoern A. Zeeb 
2320*cbb3ec25SBjoern A. Zeeb 	prev_state = phy->mt76->dfs_state;
2321*cbb3ec25SBjoern A. Zeeb 	dfs_state = mt76_phy_dfs_state(phy->mt76);
2322*cbb3ec25SBjoern A. Zeeb 
2323*cbb3ec25SBjoern A. Zeeb 	if (prev_state == dfs_state)
2324*cbb3ec25SBjoern A. Zeeb 		return 0;
2325*cbb3ec25SBjoern A. Zeeb 
2326*cbb3ec25SBjoern A. Zeeb 	if (prev_state == MT_DFS_STATE_UNKNOWN)
2327*cbb3ec25SBjoern A. Zeeb 		mt7996_dfs_stop_radar_detector(phy);
2328*cbb3ec25SBjoern A. Zeeb 
2329*cbb3ec25SBjoern A. Zeeb 	if (dfs_state == MT_DFS_STATE_DISABLED)
2330*cbb3ec25SBjoern A. Zeeb 		goto stop;
2331*cbb3ec25SBjoern A. Zeeb 
2332*cbb3ec25SBjoern A. Zeeb 	if (prev_state <= MT_DFS_STATE_DISABLED) {
2333*cbb3ec25SBjoern A. Zeeb 		err = mt7996_dfs_init_radar_specs(phy);
2334*cbb3ec25SBjoern A. Zeeb 		if (err < 0)
2335*cbb3ec25SBjoern A. Zeeb 			return err;
2336*cbb3ec25SBjoern A. Zeeb 
2337*cbb3ec25SBjoern A. Zeeb 		err = mt7996_dfs_start_radar_detector(phy);
2338*cbb3ec25SBjoern A. Zeeb 		if (err < 0)
2339*cbb3ec25SBjoern A. Zeeb 			return err;
2340*cbb3ec25SBjoern A. Zeeb 
2341*cbb3ec25SBjoern A. Zeeb 		phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2342*cbb3ec25SBjoern A. Zeeb 	}
2343*cbb3ec25SBjoern A. Zeeb 
2344*cbb3ec25SBjoern A. Zeeb 	if (dfs_state == MT_DFS_STATE_CAC)
2345*cbb3ec25SBjoern A. Zeeb 		return 0;
2346*cbb3ec25SBjoern A. Zeeb 
2347*cbb3ec25SBjoern A. Zeeb 	err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
2348*cbb3ec25SBjoern A. Zeeb 				 phy->mt76->band_idx, MT_RX_SEL0, 0);
2349*cbb3ec25SBjoern A. Zeeb 	if (err < 0) {
2350*cbb3ec25SBjoern A. Zeeb 		phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2351*cbb3ec25SBjoern A. Zeeb 		return err;
2352*cbb3ec25SBjoern A. Zeeb 	}
2353*cbb3ec25SBjoern A. Zeeb 
2354*cbb3ec25SBjoern A. Zeeb 	phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2355*cbb3ec25SBjoern A. Zeeb 	return 0;
2356*cbb3ec25SBjoern A. Zeeb 
2357*cbb3ec25SBjoern A. Zeeb stop:
2358*cbb3ec25SBjoern A. Zeeb 	err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
2359*cbb3ec25SBjoern A. Zeeb 				 phy->mt76->band_idx, MT_RX_SEL0, 0);
2360*cbb3ec25SBjoern A. Zeeb 	if (err < 0)
2361*cbb3ec25SBjoern A. Zeeb 		return err;
2362*cbb3ec25SBjoern A. Zeeb 
2363*cbb3ec25SBjoern A. Zeeb 	mt7996_dfs_stop_radar_detector(phy);
2364*cbb3ec25SBjoern A. Zeeb 	phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2365*cbb3ec25SBjoern A. Zeeb 
2366*cbb3ec25SBjoern A. Zeeb 	return 0;
2367*cbb3ec25SBjoern A. Zeeb }
2368*cbb3ec25SBjoern A. Zeeb 
2369*cbb3ec25SBjoern A. Zeeb static int
2370*cbb3ec25SBjoern A. Zeeb mt7996_mac_twt_duration_align(int duration)
2371*cbb3ec25SBjoern A. Zeeb {
2372*cbb3ec25SBjoern A. Zeeb 	return duration << 8;
2373*cbb3ec25SBjoern A. Zeeb }
2374*cbb3ec25SBjoern A. Zeeb 
2375*cbb3ec25SBjoern A. Zeeb static u64
2376*cbb3ec25SBjoern A. Zeeb mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
2377*cbb3ec25SBjoern A. Zeeb 			      struct mt7996_twt_flow *flow)
2378*cbb3ec25SBjoern A. Zeeb {
2379*cbb3ec25SBjoern A. Zeeb 	struct mt7996_twt_flow *iter, *iter_next;
2380*cbb3ec25SBjoern A. Zeeb 	u32 duration = flow->duration << 8;
2381*cbb3ec25SBjoern A. Zeeb 	u64 start_tsf;
2382*cbb3ec25SBjoern A. Zeeb 
2383*cbb3ec25SBjoern A. Zeeb 	iter = list_first_entry_or_null(&dev->twt_list,
2384*cbb3ec25SBjoern A. Zeeb 					struct mt7996_twt_flow, list);
2385*cbb3ec25SBjoern A. Zeeb 	if (!iter || !iter->sched || iter->start_tsf > duration) {
2386*cbb3ec25SBjoern A. Zeeb 		/* add flow as first entry in the list */
2387*cbb3ec25SBjoern A. Zeeb 		list_add(&flow->list, &dev->twt_list);
2388*cbb3ec25SBjoern A. Zeeb 		return 0;
2389*cbb3ec25SBjoern A. Zeeb 	}
2390*cbb3ec25SBjoern A. Zeeb 
2391*cbb3ec25SBjoern A. Zeeb 	list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2392*cbb3ec25SBjoern A. Zeeb 		start_tsf = iter->start_tsf +
2393*cbb3ec25SBjoern A. Zeeb 			    mt7996_mac_twt_duration_align(iter->duration);
2394*cbb3ec25SBjoern A. Zeeb 		if (list_is_last(&iter->list, &dev->twt_list))
2395*cbb3ec25SBjoern A. Zeeb 			break;
2396*cbb3ec25SBjoern A. Zeeb 
2397*cbb3ec25SBjoern A. Zeeb 		if (!iter_next->sched ||
2398*cbb3ec25SBjoern A. Zeeb 		    iter_next->start_tsf > start_tsf + duration) {
2399*cbb3ec25SBjoern A. Zeeb 			list_add(&flow->list, &iter->list);
2400*cbb3ec25SBjoern A. Zeeb 			goto out;
2401*cbb3ec25SBjoern A. Zeeb 		}
2402*cbb3ec25SBjoern A. Zeeb 	}
2403*cbb3ec25SBjoern A. Zeeb 
2404*cbb3ec25SBjoern A. Zeeb 	/* add flow as last entry in the list */
2405*cbb3ec25SBjoern A. Zeeb 	list_add_tail(&flow->list, &dev->twt_list);
2406*cbb3ec25SBjoern A. Zeeb out:
2407*cbb3ec25SBjoern A. Zeeb 	return start_tsf;
2408*cbb3ec25SBjoern A. Zeeb }
2409*cbb3ec25SBjoern A. Zeeb 
2410*cbb3ec25SBjoern A. Zeeb static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2411*cbb3ec25SBjoern A. Zeeb {
2412*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_twt_params *twt_agrt;
2413*cbb3ec25SBjoern A. Zeeb 	u64 interval, duration;
2414*cbb3ec25SBjoern A. Zeeb 	u16 mantissa;
2415*cbb3ec25SBjoern A. Zeeb 	u8 exp;
2416*cbb3ec25SBjoern A. Zeeb 
2417*cbb3ec25SBjoern A. Zeeb 	/* only individual agreement supported */
2418*cbb3ec25SBjoern A. Zeeb 	if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2419*cbb3ec25SBjoern A. Zeeb 		return -EOPNOTSUPP;
2420*cbb3ec25SBjoern A. Zeeb 
2421*cbb3ec25SBjoern A. Zeeb 	/* only 256us unit supported */
2422*cbb3ec25SBjoern A. Zeeb 	if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2423*cbb3ec25SBjoern A. Zeeb 		return -EOPNOTSUPP;
2424*cbb3ec25SBjoern A. Zeeb 
2425*cbb3ec25SBjoern A. Zeeb 	twt_agrt = (struct ieee80211_twt_params *)twt->params;
2426*cbb3ec25SBjoern A. Zeeb 
2427*cbb3ec25SBjoern A. Zeeb 	/* explicit agreement not supported */
2428*cbb3ec25SBjoern A. Zeeb 	if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2429*cbb3ec25SBjoern A. Zeeb 		return -EOPNOTSUPP;
2430*cbb3ec25SBjoern A. Zeeb 
2431*cbb3ec25SBjoern A. Zeeb 	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2432*cbb3ec25SBjoern A. Zeeb 			le16_to_cpu(twt_agrt->req_type));
2433*cbb3ec25SBjoern A. Zeeb 	mantissa = le16_to_cpu(twt_agrt->mantissa);
2434*cbb3ec25SBjoern A. Zeeb 	duration = twt_agrt->min_twt_dur << 8;
2435*cbb3ec25SBjoern A. Zeeb 
2436*cbb3ec25SBjoern A. Zeeb 	interval = (u64)mantissa << exp;
2437*cbb3ec25SBjoern A. Zeeb 	if (interval < duration)
2438*cbb3ec25SBjoern A. Zeeb 		return -EOPNOTSUPP;
2439*cbb3ec25SBjoern A. Zeeb 
2440*cbb3ec25SBjoern A. Zeeb 	return 0;
2441*cbb3ec25SBjoern A. Zeeb }
2442*cbb3ec25SBjoern A. Zeeb 
2443*cbb3ec25SBjoern A. Zeeb void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
2444*cbb3ec25SBjoern A. Zeeb 			      struct ieee80211_sta *sta,
2445*cbb3ec25SBjoern A. Zeeb 			      struct ieee80211_twt_setup *twt)
2446*cbb3ec25SBjoern A. Zeeb {
2447*cbb3ec25SBjoern A. Zeeb 	enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2448*cbb3ec25SBjoern A. Zeeb 	struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2449*cbb3ec25SBjoern A. Zeeb 	struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2450*cbb3ec25SBjoern A. Zeeb 	u16 req_type = le16_to_cpu(twt_agrt->req_type);
2451*cbb3ec25SBjoern A. Zeeb 	enum ieee80211_twt_setup_cmd sta_setup_cmd;
2452*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev = mt7996_hw_dev(hw);
2453*cbb3ec25SBjoern A. Zeeb 	struct mt7996_twt_flow *flow;
2454*cbb3ec25SBjoern A. Zeeb 	int flowid, table_id;
2455*cbb3ec25SBjoern A. Zeeb 	u8 exp;
2456*cbb3ec25SBjoern A. Zeeb 
2457*cbb3ec25SBjoern A. Zeeb 	if (mt7996_mac_check_twt_req(twt))
2458*cbb3ec25SBjoern A. Zeeb 		goto out;
2459*cbb3ec25SBjoern A. Zeeb 
2460*cbb3ec25SBjoern A. Zeeb 	mutex_lock(&dev->mt76.mutex);
2461*cbb3ec25SBjoern A. Zeeb 
2462*cbb3ec25SBjoern A. Zeeb 	if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
2463*cbb3ec25SBjoern A. Zeeb 		goto unlock;
2464*cbb3ec25SBjoern A. Zeeb 
2465*cbb3ec25SBjoern A. Zeeb 	if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2466*cbb3ec25SBjoern A. Zeeb 		goto unlock;
2467*cbb3ec25SBjoern A. Zeeb 
2468*cbb3ec25SBjoern A. Zeeb 	flowid = ffs(~msta->twt.flowid_mask) - 1;
2469*cbb3ec25SBjoern A. Zeeb 	le16p_replace_bits(&twt_agrt->req_type, flowid,
2470*cbb3ec25SBjoern A. Zeeb 			   IEEE80211_TWT_REQTYPE_FLOWID);
2471*cbb3ec25SBjoern A. Zeeb 
2472*cbb3ec25SBjoern A. Zeeb 	table_id = ffs(~dev->twt.table_mask) - 1;
2473*cbb3ec25SBjoern A. Zeeb 	exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2474*cbb3ec25SBjoern A. Zeeb 	sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2475*cbb3ec25SBjoern A. Zeeb 
2476*cbb3ec25SBjoern A. Zeeb 	flow = &msta->twt.flow[flowid];
2477*cbb3ec25SBjoern A. Zeeb 	memset(flow, 0, sizeof(*flow));
2478*cbb3ec25SBjoern A. Zeeb 	INIT_LIST_HEAD(&flow->list);
2479*cbb3ec25SBjoern A. Zeeb 	flow->wcid = msta->wcid.idx;
2480*cbb3ec25SBjoern A. Zeeb 	flow->table_id = table_id;
2481*cbb3ec25SBjoern A. Zeeb 	flow->id = flowid;
2482*cbb3ec25SBjoern A. Zeeb 	flow->duration = twt_agrt->min_twt_dur;
2483*cbb3ec25SBjoern A. Zeeb 	flow->mantissa = twt_agrt->mantissa;
2484*cbb3ec25SBjoern A. Zeeb 	flow->exp = exp;
2485*cbb3ec25SBjoern A. Zeeb 	flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2486*cbb3ec25SBjoern A. Zeeb 	flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2487*cbb3ec25SBjoern A. Zeeb 	flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2488*cbb3ec25SBjoern A. Zeeb 
2489*cbb3ec25SBjoern A. Zeeb 	if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2490*cbb3ec25SBjoern A. Zeeb 	    sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2491*cbb3ec25SBjoern A. Zeeb 		u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2492*cbb3ec25SBjoern A. Zeeb 		u64 flow_tsf, curr_tsf;
2493*cbb3ec25SBjoern A. Zeeb 		u32 rem;
2494*cbb3ec25SBjoern A. Zeeb 
2495*cbb3ec25SBjoern A. Zeeb 		flow->sched = true;
2496*cbb3ec25SBjoern A. Zeeb 		flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
2497*cbb3ec25SBjoern A. Zeeb 		curr_tsf = __mt7996_get_tsf(hw, msta->vif);
2498*cbb3ec25SBjoern A. Zeeb 		div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2499*cbb3ec25SBjoern A. Zeeb 		flow_tsf = curr_tsf + interval - rem;
2500*cbb3ec25SBjoern A. Zeeb 		twt_agrt->twt = cpu_to_le64(flow_tsf);
2501*cbb3ec25SBjoern A. Zeeb 	} else {
2502*cbb3ec25SBjoern A. Zeeb 		list_add_tail(&flow->list, &dev->twt_list);
2503*cbb3ec25SBjoern A. Zeeb 	}
2504*cbb3ec25SBjoern A. Zeeb 	flow->tsf = le64_to_cpu(twt_agrt->twt);
2505*cbb3ec25SBjoern A. Zeeb 
2506*cbb3ec25SBjoern A. Zeeb 	if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2507*cbb3ec25SBjoern A. Zeeb 		goto unlock;
2508*cbb3ec25SBjoern A. Zeeb 
2509*cbb3ec25SBjoern A. Zeeb 	setup_cmd = TWT_SETUP_CMD_ACCEPT;
2510*cbb3ec25SBjoern A. Zeeb 	dev->twt.table_mask |= BIT(table_id);
2511*cbb3ec25SBjoern A. Zeeb 	msta->twt.flowid_mask |= BIT(flowid);
2512*cbb3ec25SBjoern A. Zeeb 	dev->twt.n_agrt++;
2513*cbb3ec25SBjoern A. Zeeb 
2514*cbb3ec25SBjoern A. Zeeb unlock:
2515*cbb3ec25SBjoern A. Zeeb 	mutex_unlock(&dev->mt76.mutex);
2516*cbb3ec25SBjoern A. Zeeb out:
2517*cbb3ec25SBjoern A. Zeeb 	le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
2518*cbb3ec25SBjoern A. Zeeb 			   IEEE80211_TWT_REQTYPE_SETUP_CMD);
2519*cbb3ec25SBjoern A. Zeeb 	twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
2520*cbb3ec25SBjoern A. Zeeb 		       (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
2521*cbb3ec25SBjoern A. Zeeb }
2522*cbb3ec25SBjoern A. Zeeb 
2523*cbb3ec25SBjoern A. Zeeb void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
2524*cbb3ec25SBjoern A. Zeeb 				  struct mt7996_sta *msta,
2525*cbb3ec25SBjoern A. Zeeb 				  u8 flowid)
2526*cbb3ec25SBjoern A. Zeeb {
2527*cbb3ec25SBjoern A. Zeeb 	struct mt7996_twt_flow *flow;
2528*cbb3ec25SBjoern A. Zeeb 
2529*cbb3ec25SBjoern A. Zeeb 	lockdep_assert_held(&dev->mt76.mutex);
2530*cbb3ec25SBjoern A. Zeeb 
2531*cbb3ec25SBjoern A. Zeeb 	if (flowid >= ARRAY_SIZE(msta->twt.flow))
2532*cbb3ec25SBjoern A. Zeeb 		return;
2533*cbb3ec25SBjoern A. Zeeb 
2534*cbb3ec25SBjoern A. Zeeb 	if (!(msta->twt.flowid_mask & BIT(flowid)))
2535*cbb3ec25SBjoern A. Zeeb 		return;
2536*cbb3ec25SBjoern A. Zeeb 
2537*cbb3ec25SBjoern A. Zeeb 	flow = &msta->twt.flow[flowid];
2538*cbb3ec25SBjoern A. Zeeb 	if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
2539*cbb3ec25SBjoern A. Zeeb 				       MCU_TWT_AGRT_DELETE))
2540*cbb3ec25SBjoern A. Zeeb 		return;
2541*cbb3ec25SBjoern A. Zeeb 
2542*cbb3ec25SBjoern A. Zeeb 	list_del_init(&flow->list);
2543*cbb3ec25SBjoern A. Zeeb 	msta->twt.flowid_mask &= ~BIT(flowid);
2544*cbb3ec25SBjoern A. Zeeb 	dev->twt.table_mask &= ~BIT(flow->table_id);
2545*cbb3ec25SBjoern A. Zeeb 	dev->twt.n_agrt--;
2546*cbb3ec25SBjoern A. Zeeb }
2547