xref: /linux/drivers/net/wireless/mediatek/mt76/mt7915/mac.c (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/timekeeping.h>
6 #include "mt7915.h"
7 #include "../dma.h"
8 #include "mac.h"
9 
10 #define to_rssi(field, rxv)	((FIELD_GET(field, rxv) - 220) / 2)
11 
12 #define HE_BITS(f)		cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
13 #define HE_PREP(f, m, v)	le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
14 						 IEEE80211_RADIOTAP_HE_##f)
15 
16 static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
17 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
18 	.radar_pattern = {
19 		[5] =  { 1, 0,  6, 32, 28, 0,  990, 5010, 17, 1, 1 },
20 		[6] =  { 1, 0,  9, 32, 28, 0,  615, 5010, 27, 1, 1 },
21 		[7] =  { 1, 0, 15, 32, 28, 0,  240,  445, 27, 1, 1 },
22 		[8] =  { 1, 0, 12, 32, 28, 0,  240,  510, 42, 1, 1 },
23 		[9] =  { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
24 		[10] = { 1, 1,  0,  0,  0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
25 		[11] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 18, 32, 28, { },  54 },
26 		[12] = { 1, 1,  0,  0,  0, 0,  823, 2510, 14, 0, 0, 27, 32, 24, { },  54 },
27 	},
28 };
29 
30 static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
31 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
32 	.radar_pattern = {
33 		[0] = { 1, 0,  8,  32, 28, 0, 508, 3076, 13, 1,  1 },
34 		[1] = { 1, 0, 12,  32, 28, 0, 140,  240, 17, 1,  1 },
35 		[2] = { 1, 0,  8,  32, 28, 0, 190,  510, 22, 1,  1 },
36 		[3] = { 1, 0,  6,  32, 28, 0, 190,  510, 32, 1,  1 },
37 		[4] = { 1, 0,  9, 255, 28, 0, 323,  343, 13, 1, 32 },
38 	},
39 };
40 
41 static const struct mt7915_dfs_radar_spec jp_radar_specs = {
42 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
43 	.radar_pattern = {
44 		[0] =  { 1, 0,  8,  32, 28, 0,  508, 3076,  13, 1,  1 },
45 		[1] =  { 1, 0, 12,  32, 28, 0,  140,  240,  17, 1,  1 },
46 		[2] =  { 1, 0,  8,  32, 28, 0,  190,  510,  22, 1,  1 },
47 		[3] =  { 1, 0,  6,  32, 28, 0,  190,  510,  32, 1,  1 },
48 		[4] =  { 1, 0,  9, 255, 28, 0,  323,  343,  13, 1, 32 },
49 		[13] = { 1, 0,  7,  32, 28, 0, 3836, 3856,  14, 1,  1 },
50 		[14] = { 1, 0,  6,  32, 28, 0,  615, 5010, 110, 1,  1 },
51 		[15] = { 1, 1,  0,   0,  0, 0,   15, 5010, 110, 0,  0, 12, 32, 28 },
52 	},
53 };
54 
55 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
56 					    u16 idx, bool unicast)
57 {
58 	struct mt7915_sta *sta;
59 	struct mt76_wcid *wcid;
60 
61 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
62 		return NULL;
63 
64 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
65 	if (unicast || !wcid)
66 		return wcid;
67 
68 	if (!wcid->sta)
69 		return NULL;
70 
71 	sta = container_of(wcid, struct mt7915_sta, wcid);
72 	if (!sta->vif)
73 		return NULL;
74 
75 	return &sta->vif->sta.wcid;
76 }
77 
78 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
79 {
80 }
81 
82 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
83 {
84 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
85 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
86 
87 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
88 			 0, 5000);
89 }
90 
91 static u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid)
92 {
93 	mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
94 		FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
95 
96 	return MT_WTBL_LMAC_OFFS(wcid, 0);
97 }
98 
99 /* TODO: use txfree airtime info to avoid runtime accessing in the long run */
100 static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
101 {
102 	static const u8 ac_to_tid[] = {
103 		[IEEE80211_AC_BE] = 0,
104 		[IEEE80211_AC_BK] = 1,
105 		[IEEE80211_AC_VI] = 4,
106 		[IEEE80211_AC_VO] = 6
107 	};
108 	struct ieee80211_sta *sta;
109 	struct mt7915_sta *msta;
110 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
111 	LIST_HEAD(sta_poll_list);
112 	int i;
113 
114 	spin_lock_bh(&dev->sta_poll_lock);
115 	list_splice_init(&dev->sta_poll_list, &sta_poll_list);
116 	spin_unlock_bh(&dev->sta_poll_lock);
117 
118 	rcu_read_lock();
119 
120 	while (true) {
121 		bool clear = false;
122 		u32 addr;
123 		u16 idx;
124 
125 		spin_lock_bh(&dev->sta_poll_lock);
126 		if (list_empty(&sta_poll_list)) {
127 			spin_unlock_bh(&dev->sta_poll_lock);
128 			break;
129 		}
130 		msta = list_first_entry(&sta_poll_list,
131 					struct mt7915_sta, poll_list);
132 		list_del_init(&msta->poll_list);
133 		spin_unlock_bh(&dev->sta_poll_lock);
134 
135 		idx = msta->wcid.idx;
136 		addr = mt7915_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
137 
138 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
139 			u32 tx_last = msta->airtime_ac[i];
140 			u32 rx_last = msta->airtime_ac[i + 4];
141 
142 			msta->airtime_ac[i] = mt76_rr(dev, addr);
143 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
144 
145 			tx_time[i] = msta->airtime_ac[i] - tx_last;
146 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
147 
148 			if ((tx_last | rx_last) & BIT(30))
149 				clear = true;
150 
151 			addr += 8;
152 		}
153 
154 		if (clear) {
155 			mt7915_mac_wtbl_update(dev, idx,
156 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
157 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
158 		}
159 
160 		if (!msta->wcid.sta)
161 			continue;
162 
163 		sta = container_of((void *)msta, struct ieee80211_sta,
164 				   drv_priv);
165 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
166 			u8 q = mt7915_lmac_mapping(dev, i);
167 			u32 tx_cur = tx_time[q];
168 			u32 rx_cur = rx_time[q];
169 			u8 tid = ac_to_tid[i];
170 
171 			if (!tx_cur && !rx_cur)
172 				continue;
173 
174 			ieee80211_sta_register_airtime(sta, tid, tx_cur,
175 						       rx_cur);
176 		}
177 	}
178 
179 	rcu_read_unlock();
180 }
181 
182 static void
183 mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
184 				 struct ieee80211_radiotap_he *he,
185 				 __le32 *rxv)
186 {
187 	u32 ru_h, ru_l;
188 	u8 ru, offs = 0;
189 
190 	ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
191 	ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
192 	ru = (u8)(ru_l | ru_h << 4);
193 
194 	status->bw = RATE_INFO_BW_HE_RU;
195 
196 	switch (ru) {
197 	case 0 ... 36:
198 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
199 		offs = ru;
200 		break;
201 	case 37 ... 52:
202 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
203 		offs = ru - 37;
204 		break;
205 	case 53 ... 60:
206 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
207 		offs = ru - 53;
208 		break;
209 	case 61 ... 64:
210 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
211 		offs = ru - 61;
212 		break;
213 	case 65 ... 66:
214 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
215 		offs = ru - 65;
216 		break;
217 	case 67:
218 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
219 		break;
220 	case 68:
221 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
222 		break;
223 	}
224 
225 	he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
226 	he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
227 		     le16_encode_bits(offs,
228 				      IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
229 }
230 
231 static void
232 mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
233 			      struct mt76_rx_status *status,
234 			      __le32 *rxv, u32 phy)
235 {
236 	/* TODO: struct ieee80211_radiotap_he_mu */
237 	static const struct ieee80211_radiotap_he known = {
238 		.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
239 			 HE_BITS(DATA1_DATA_DCM_KNOWN) |
240 			 HE_BITS(DATA1_STBC_KNOWN) |
241 			 HE_BITS(DATA1_CODING_KNOWN) |
242 			 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
243 			 HE_BITS(DATA1_DOPPLER_KNOWN) |
244 			 HE_BITS(DATA1_BSS_COLOR_KNOWN),
245 		.data2 = HE_BITS(DATA2_GI_KNOWN) |
246 			 HE_BITS(DATA2_TXBF_KNOWN) |
247 			 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
248 			 HE_BITS(DATA2_TXOP_KNOWN),
249 	};
250 	struct ieee80211_radiotap_he *he = NULL;
251 	u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
252 
253 	he = skb_push(skb, sizeof(known));
254 	memcpy(he, &known, sizeof(known));
255 
256 	he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
257 		    HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
258 	he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
259 		    le16_encode_bits(ltf_size,
260 				     IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
261 	he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
262 		    HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
263 
264 	switch (phy) {
265 	case MT_PHY_TYPE_HE_SU:
266 		he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
267 			     HE_BITS(DATA1_UL_DL_KNOWN) |
268 			     HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
269 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN);
270 
271 		he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
272 			     HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
273 		he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
274 		break;
275 	case MT_PHY_TYPE_HE_EXT_SU:
276 		he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
277 			     HE_BITS(DATA1_UL_DL_KNOWN);
278 
279 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
280 		break;
281 	case MT_PHY_TYPE_HE_MU:
282 		he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
283 			     HE_BITS(DATA1_UL_DL_KNOWN) |
284 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN);
285 
286 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
287 		he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
288 
289 		mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
290 		break;
291 	case MT_PHY_TYPE_HE_TB:
292 		he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
293 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
294 			     HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
295 			     HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
296 			     HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
297 
298 		he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
299 			     HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
300 			     HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
301 			     HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
302 
303 		mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
304 		break;
305 	default:
306 		break;
307 	}
308 }
309 
310 int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
311 {
312 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
313 	struct mt76_phy *mphy = &dev->mt76.phy;
314 	struct mt7915_phy *phy = &dev->phy;
315 	struct ieee80211_supported_band *sband;
316 	struct ieee80211_hdr *hdr;
317 	__le32 *rxd = (__le32 *)skb->data;
318 	__le32 *rxv = NULL;
319 	u32 mode = 0;
320 	u32 rxd0 = le32_to_cpu(rxd[0]);
321 	u32 rxd1 = le32_to_cpu(rxd[1]);
322 	u32 rxd2 = le32_to_cpu(rxd[2]);
323 	u32 rxd3 = le32_to_cpu(rxd[3]);
324 	u32 rxd4 = le32_to_cpu(rxd[4]);
325 	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
326 	bool unicast, insert_ccmp_hdr = false;
327 	u8 remove_pad, amsdu_info;
328 	bool hdr_trans;
329 	u16 seq_ctrl = 0;
330 	u8 qos_ctl = 0;
331 	__le16 fc = 0;
332 	int i, idx;
333 
334 	memset(status, 0, sizeof(*status));
335 
336 	if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) {
337 		mphy = dev->mt76.phy2;
338 		if (!mphy)
339 			return -EINVAL;
340 
341 		phy = mphy->priv;
342 		status->ext_phy = true;
343 	}
344 
345 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
346 		return -EINVAL;
347 
348 	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
349 		return -EINVAL;
350 
351 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
352 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
353 	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
354 	status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
355 
356 	if (status->wcid) {
357 		struct mt7915_sta *msta;
358 
359 		msta = container_of(status->wcid, struct mt7915_sta, wcid);
360 		spin_lock_bh(&dev->sta_poll_lock);
361 		if (list_empty(&msta->poll_list))
362 			list_add_tail(&msta->poll_list, &dev->sta_poll_list);
363 		spin_unlock_bh(&dev->sta_poll_lock);
364 	}
365 
366 	status->freq = mphy->chandef.chan->center_freq;
367 	status->band = mphy->chandef.chan->band;
368 	if (status->band == NL80211_BAND_5GHZ)
369 		sband = &mphy->sband_5g.sband;
370 	else
371 		sband = &mphy->sband_2g.sband;
372 
373 	if (!sband->channels)
374 		return -EINVAL;
375 
376 	if ((rxd0 & csum_mask) == csum_mask)
377 		skb->ip_summed = CHECKSUM_UNNECESSARY;
378 
379 	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
380 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
381 
382 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
383 		status->flag |= RX_FLAG_MMIC_ERROR;
384 
385 	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
386 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
387 		status->flag |= RX_FLAG_DECRYPTED;
388 		status->flag |= RX_FLAG_IV_STRIPPED;
389 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
390 	}
391 
392 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
393 
394 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
395 		return -EINVAL;
396 
397 	rxd += 6;
398 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
399 		u32 v0 = le32_to_cpu(rxd[0]);
400 		u32 v2 = le32_to_cpu(rxd[2]);
401 
402 		fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
403 		qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
404 		seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
405 
406 		rxd += 4;
407 		if ((u8 *)rxd - skb->data >= skb->len)
408 			return -EINVAL;
409 	}
410 
411 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
412 		u8 *data = (u8 *)rxd;
413 
414 		if (status->flag & RX_FLAG_DECRYPTED) {
415 			status->iv[0] = data[5];
416 			status->iv[1] = data[4];
417 			status->iv[2] = data[3];
418 			status->iv[3] = data[2];
419 			status->iv[4] = data[1];
420 			status->iv[5] = data[0];
421 
422 			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
423 		}
424 		rxd += 4;
425 		if ((u8 *)rxd - skb->data >= skb->len)
426 			return -EINVAL;
427 	}
428 
429 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
430 		status->timestamp = le32_to_cpu(rxd[0]);
431 		status->flag |= RX_FLAG_MACTIME_START;
432 
433 		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
434 			status->flag |= RX_FLAG_AMPDU_DETAILS;
435 
436 			/* all subframes of an A-MPDU have the same timestamp */
437 			if (phy->rx_ampdu_ts != status->timestamp) {
438 				if (!++phy->ampdu_ref)
439 					phy->ampdu_ref++;
440 			}
441 			phy->rx_ampdu_ts = status->timestamp;
442 
443 			status->ampdu_ref = phy->ampdu_ref;
444 		}
445 
446 		rxd += 2;
447 		if ((u8 *)rxd - skb->data >= skb->len)
448 			return -EINVAL;
449 	}
450 
451 	/* RXD Group 3 - P-RXV */
452 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
453 		u32 v0, v1, v2;
454 
455 		rxv = rxd;
456 		rxd += 2;
457 		if ((u8 *)rxd - skb->data >= skb->len)
458 			return -EINVAL;
459 
460 		v0 = le32_to_cpu(rxv[0]);
461 		v1 = le32_to_cpu(rxv[1]);
462 		v2 = le32_to_cpu(rxv[2]);
463 
464 		if (v0 & MT_PRXV_HT_AD_CODE)
465 			status->enc_flags |= RX_ENC_FLAG_LDPC;
466 
467 		status->chains = mphy->antenna_mask;
468 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
469 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
470 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
471 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
472 		status->signal = status->chain_signal[0];
473 
474 		for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
475 			if (!(status->chains & BIT(i)))
476 				continue;
477 
478 			status->signal = max(status->signal,
479 					     status->chain_signal[i]);
480 		}
481 
482 		/* RXD Group 5 - C-RXV */
483 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
484 			u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
485 			u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
486 			bool cck = false;
487 
488 			rxd += 18;
489 			if ((u8 *)rxd - skb->data >= skb->len)
490 				return -EINVAL;
491 
492 			idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
493 			mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
494 
495 			switch (mode) {
496 			case MT_PHY_TYPE_CCK:
497 				cck = true;
498 				fallthrough;
499 			case MT_PHY_TYPE_OFDM:
500 				i = mt76_get_rate(&dev->mt76, sband, i, cck);
501 				break;
502 			case MT_PHY_TYPE_HT_GF:
503 			case MT_PHY_TYPE_HT:
504 				status->encoding = RX_ENC_HT;
505 				if (i > 31)
506 					return -EINVAL;
507 				break;
508 			case MT_PHY_TYPE_VHT:
509 				status->nss =
510 					FIELD_GET(MT_PRXV_NSTS, v0) + 1;
511 				status->encoding = RX_ENC_VHT;
512 				if (i > 9)
513 					return -EINVAL;
514 				break;
515 			case MT_PHY_TYPE_HE_MU:
516 				status->flag |= RX_FLAG_RADIOTAP_HE_MU;
517 				fallthrough;
518 			case MT_PHY_TYPE_HE_SU:
519 			case MT_PHY_TYPE_HE_EXT_SU:
520 			case MT_PHY_TYPE_HE_TB:
521 				status->nss =
522 					FIELD_GET(MT_PRXV_NSTS, v0) + 1;
523 				status->encoding = RX_ENC_HE;
524 				status->flag |= RX_FLAG_RADIOTAP_HE;
525 				i &= GENMASK(3, 0);
526 
527 				if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
528 					status->he_gi = gi;
529 
530 				status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
531 				break;
532 			default:
533 				return -EINVAL;
534 			}
535 			status->rate_idx = i;
536 
537 			switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
538 			case IEEE80211_STA_RX_BW_20:
539 				break;
540 			case IEEE80211_STA_RX_BW_40:
541 				if (mode & MT_PHY_TYPE_HE_EXT_SU &&
542 				    (idx & MT_PRXV_TX_ER_SU_106T)) {
543 					status->bw = RATE_INFO_BW_HE_RU;
544 					status->he_ru =
545 						NL80211_RATE_INFO_HE_RU_ALLOC_106;
546 				} else {
547 					status->bw = RATE_INFO_BW_40;
548 				}
549 				break;
550 			case IEEE80211_STA_RX_BW_80:
551 				status->bw = RATE_INFO_BW_80;
552 				break;
553 			case IEEE80211_STA_RX_BW_160:
554 				status->bw = RATE_INFO_BW_160;
555 				break;
556 			default:
557 				return -EINVAL;
558 			}
559 
560 			status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
561 			if (mode < MT_PHY_TYPE_HE_SU && gi)
562 				status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
563 		}
564 	}
565 
566 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
567 
568 	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
569 	status->amsdu = !!amsdu_info;
570 	if (status->amsdu) {
571 		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
572 		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
573 		if (!hdr_trans) {
574 			memmove(skb->data + 2, skb->data,
575 				ieee80211_get_hdrlen_from_skb(skb));
576 			skb_pull(skb, 2);
577 		}
578 	}
579 
580 	if (insert_ccmp_hdr && !hdr_trans) {
581 		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
582 
583 		mt76_insert_ccmp_hdr(skb, key_id);
584 	}
585 
586 	if (!hdr_trans) {
587 		hdr = mt76_skb_get_hdr(skb);
588 		fc = hdr->frame_control;
589 		if (ieee80211_is_data_qos(fc)) {
590 			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
591 			qos_ctl = *ieee80211_get_qos_ctl(hdr);
592 		}
593 	} else {
594 		status->flag &= ~(RX_FLAG_RADIOTAP_HE |
595 				  RX_FLAG_RADIOTAP_HE_MU);
596 		status->flag |= RX_FLAG_8023;
597 	}
598 
599 	if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
600 		mt7915_mac_decode_he_radiotap(skb, status, rxv, mode);
601 
602 	if (!status->wcid || !ieee80211_is_data_qos(fc))
603 		return 0;
604 
605 	status->aggr = unicast &&
606 		       !ieee80211_is_qos_nullfunc(fc);
607 	status->qos_ctl = qos_ctl;
608 	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
609 
610 	return 0;
611 }
612 
613 #ifdef CONFIG_NL80211_TESTMODE
614 void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
615 {
616 	struct mt7915_phy *phy = &dev->phy;
617 	__le32 *rxd = (__le32 *)skb->data;
618 	__le32 *rxv_hdr = rxd + 2;
619 	__le32 *rxv = rxd + 4;
620 	u32 rcpi, ib_rssi, wb_rssi, v20, v21;
621 	bool ext_phy;
622 	s32 foe;
623 	u8 snr;
624 	int i;
625 
626 	ext_phy = FIELD_GET(MT_RXV_HDR_BAND_IDX, le32_to_cpu(rxv_hdr[1]));
627 	if (ext_phy)
628 		phy = mt7915_ext_phy(dev);
629 
630 	rcpi = le32_to_cpu(rxv[6]);
631 	ib_rssi = le32_to_cpu(rxv[7]);
632 	wb_rssi = le32_to_cpu(rxv[8]) >> 5;
633 
634 	for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) {
635 		if (i == 3)
636 			wb_rssi = le32_to_cpu(rxv[9]);
637 
638 		phy->test.last_rcpi[i] = rcpi & 0xff;
639 		phy->test.last_ib_rssi[i] = ib_rssi & 0xff;
640 		phy->test.last_wb_rssi[i] = wb_rssi & 0xff;
641 	}
642 
643 	v20 = le32_to_cpu(rxv[20]);
644 	v21 = le32_to_cpu(rxv[21]);
645 
646 	foe = FIELD_GET(MT_CRXV_FOE_LO, v20) |
647 	      (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT);
648 
649 	snr = FIELD_GET(MT_CRXV_SNR, v20) - 16;
650 
651 	phy->test.last_freq_offset = foe;
652 	phy->test.last_snr = snr;
653 
654 	dev_kfree_skb(skb);
655 }
656 #endif
657 
658 static void
659 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
660 			 struct sk_buff *skb)
661 {
662 #ifdef CONFIG_NL80211_TESTMODE
663 	struct mt76_testmode_data *td = &phy->mt76->test;
664 	const struct ieee80211_rate *r;
665 	u8 bw, mode, nss = td->tx_rate_nss;
666 	u8 rate_idx = td->tx_rate_idx;
667 	u16 rateval = 0;
668 	u32 val;
669 	bool cck = false;
670 	int band;
671 
672 	if (skb != phy->mt76->test.tx_skb)
673 		return;
674 
675 	switch (td->tx_rate_mode) {
676 	case MT76_TM_TX_MODE_HT:
677 		nss = 1 + (rate_idx >> 3);
678 		mode = MT_PHY_TYPE_HT;
679 		break;
680 	case MT76_TM_TX_MODE_VHT:
681 		mode = MT_PHY_TYPE_VHT;
682 		break;
683 	case MT76_TM_TX_MODE_HE_SU:
684 		mode = MT_PHY_TYPE_HE_SU;
685 		break;
686 	case MT76_TM_TX_MODE_HE_EXT_SU:
687 		mode = MT_PHY_TYPE_HE_EXT_SU;
688 		break;
689 	case MT76_TM_TX_MODE_HE_TB:
690 		mode = MT_PHY_TYPE_HE_TB;
691 		break;
692 	case MT76_TM_TX_MODE_HE_MU:
693 		mode = MT_PHY_TYPE_HE_MU;
694 		break;
695 	case MT76_TM_TX_MODE_CCK:
696 		cck = true;
697 		fallthrough;
698 	case MT76_TM_TX_MODE_OFDM:
699 		band = phy->mt76->chandef.chan->band;
700 		if (band == NL80211_BAND_2GHZ && !cck)
701 			rate_idx += 4;
702 
703 		r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
704 		val = cck ? r->hw_value_short : r->hw_value;
705 
706 		mode = val >> 8;
707 		rate_idx = val & 0xff;
708 		break;
709 	default:
710 		mode = MT_PHY_TYPE_OFDM;
711 		break;
712 	}
713 
714 	switch (phy->mt76->chandef.width) {
715 	case NL80211_CHAN_WIDTH_40:
716 		bw = 1;
717 		break;
718 	case NL80211_CHAN_WIDTH_80:
719 		bw = 2;
720 		break;
721 	case NL80211_CHAN_WIDTH_80P80:
722 	case NL80211_CHAN_WIDTH_160:
723 		bw = 3;
724 		break;
725 	default:
726 		bw = 0;
727 		break;
728 	}
729 
730 	if (td->tx_rate_stbc && nss == 1) {
731 		nss++;
732 		rateval |= MT_TX_RATE_STBC;
733 	}
734 
735 	rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
736 		   FIELD_PREP(MT_TX_RATE_MODE, mode) |
737 		   FIELD_PREP(MT_TX_RATE_NSS, nss - 1);
738 
739 	txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
740 
741 	le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT);
742 	if (td->tx_rate_mode < MT76_TM_TX_MODE_HT)
743 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
744 
745 	val = MT_TXD6_FIXED_BW |
746 	      FIELD_PREP(MT_TXD6_BW, bw) |
747 	      FIELD_PREP(MT_TXD6_TX_RATE, rateval) |
748 	      FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi);
749 
750 	/* for HE_SU/HE_EXT_SU PPDU
751 	 * - 1x, 2x, 4x LTF + 0.8us GI
752 	 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
753 	 * for HE_MU PPDU
754 	 * - 2x, 4x LTF + 0.8us GI
755 	 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
756 	 * for HE_TB PPDU
757 	 * - 1x, 2x LTF + 1.6us GI
758 	 * - 4x LTF + 3.2us GI
759 	 */
760 	if (mode >= MT_PHY_TYPE_HE_SU)
761 		val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
762 
763 	if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
764 		val |= MT_TXD6_LDPC;
765 
766 	txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
767 	txwi[6] |= cpu_to_le32(val);
768 	txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
769 					  phy->test.spe_idx));
770 #endif
771 }
772 
773 static void
774 mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
775 			   struct sk_buff *skb, struct mt76_wcid *wcid)
776 {
777 
778 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
779 	u8 fc_type, fc_stype;
780 	bool wmm = false;
781 	u32 val;
782 
783 	if (wcid->sta) {
784 		struct ieee80211_sta *sta;
785 
786 		sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
787 		wmm = sta->wme;
788 	}
789 
790 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
791 	      FIELD_PREP(MT_TXD1_TID, tid);
792 
793 	if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
794 		val |= MT_TXD1_ETH_802_3;
795 
796 	txwi[1] |= cpu_to_le32(val);
797 
798 	fc_type = IEEE80211_FTYPE_DATA >> 2;
799 	fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
800 
801 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
802 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
803 
804 	txwi[2] |= cpu_to_le32(val);
805 
806 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
807 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
808 	txwi[7] |= cpu_to_le32(val);
809 }
810 
811 static void
812 mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
813 			    struct sk_buff *skb, struct ieee80211_key_conf *key)
814 {
815 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
816 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
817 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
818 	bool multicast = is_multicast_ether_addr(hdr->addr1);
819 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
820 	__le16 fc = hdr->frame_control;
821 	u8 fc_type, fc_stype;
822 	u32 val;
823 
824 	if (ieee80211_is_action(fc) &&
825 	    mgmt->u.action.category == WLAN_CATEGORY_BACK &&
826 	    mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
827 		u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
828 
829 		txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
830 		tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
831 	} else if (ieee80211_is_back_req(hdr->frame_control)) {
832 		struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
833 		u16 control = le16_to_cpu(bar->control);
834 
835 		tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
836 	}
837 
838 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
839 	      FIELD_PREP(MT_TXD1_HDR_INFO,
840 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
841 	      FIELD_PREP(MT_TXD1_TID, tid);
842 	txwi[1] |= cpu_to_le32(val);
843 
844 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
845 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
846 
847 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
848 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
849 	      FIELD_PREP(MT_TXD2_MULTICAST, multicast);
850 
851 	if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
852 	    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
853 		val |= MT_TXD2_BIP;
854 		txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
855 	}
856 
857 	if (!ieee80211_is_data(fc) || multicast)
858 		val |= MT_TXD2_FIX_RATE;
859 
860 	txwi[2] |= cpu_to_le32(val);
861 
862 	if (ieee80211_is_beacon(fc)) {
863 		txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
864 		txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
865 	}
866 
867 	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
868 		u16 seqno = le16_to_cpu(hdr->seq_ctrl);
869 
870 		if (ieee80211_is_back_req(hdr->frame_control)) {
871 			struct ieee80211_bar *bar;
872 
873 			bar = (struct ieee80211_bar *)skb->data;
874 			seqno = le16_to_cpu(bar->start_seq_num);
875 		}
876 
877 		val = MT_TXD3_SN_VALID |
878 		      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
879 		txwi[3] |= cpu_to_le32(val);
880 	}
881 
882 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
883 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
884 	txwi[7] |= cpu_to_le32(val);
885 }
886 
887 void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
888 			   struct sk_buff *skb, struct mt76_wcid *wcid,
889 			   struct ieee80211_key_conf *key, bool beacon)
890 {
891 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
892 	struct ieee80211_vif *vif = info->control.vif;
893 	struct mt76_phy *mphy = &dev->mphy;
894 	bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
895 	u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
896 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
897 	u16 tx_count = 15;
898 	u32 val;
899 
900 	if (vif) {
901 		struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
902 
903 		omac_idx = mvif->omac_idx;
904 		wmm_idx = mvif->wmm_idx;
905 	}
906 
907 	if (ext_phy && dev->mt76.phy2)
908 		mphy = dev->mt76.phy2;
909 
910 	if (beacon) {
911 		p_fmt = MT_TX_TYPE_FW;
912 		q_idx = MT_LMAC_BCN0;
913 	} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
914 		p_fmt = MT_TX_TYPE_CT;
915 		q_idx = MT_LMAC_ALTX0;
916 	} else {
917 		p_fmt = MT_TX_TYPE_CT;
918 		q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
919 			mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
920 	}
921 
922 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
923 	      FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
924 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
925 	txwi[0] = cpu_to_le32(val);
926 
927 	val = MT_TXD1_LONG_FORMAT |
928 	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
929 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
930 
931 	if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
932 		val |= MT_TXD1_TGID;
933 
934 	txwi[1] = cpu_to_le32(val);
935 
936 	txwi[2] = 0;
937 
938 	val = MT_TXD3_SW_POWER_MGMT |
939 	      FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
940 	if (key)
941 		val |= MT_TXD3_PROTECT_FRAME;
942 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
943 		val |= MT_TXD3_NO_ACK;
944 
945 	txwi[3] = cpu_to_le32(val);
946 	txwi[4] = 0;
947 	txwi[5] = 0;
948 	txwi[6] = 0;
949 	txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
950 
951 	if (is_8023)
952 		mt7915_mac_write_txwi_8023(dev, txwi, skb, wcid);
953 	else
954 		mt7915_mac_write_txwi_80211(dev, txwi, skb, key);
955 
956 	if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
957 		u16 rate;
958 
959 		/* hardware won't add HTC for mgmt/ctrl frame */
960 		txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
961 
962 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
963 			rate = MT7915_5G_RATE_DEFAULT;
964 		else
965 			rate = MT7915_2G_RATE_DEFAULT;
966 
967 		val = MT_TXD6_FIXED_BW |
968 		      FIELD_PREP(MT_TXD6_TX_RATE, rate);
969 		txwi[6] |= cpu_to_le32(val);
970 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
971 	}
972 
973 	if (mt76_testmode_enabled(mphy))
974 		mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
975 }
976 
977 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
978 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
979 			  struct ieee80211_sta *sta,
980 			  struct mt76_tx_info *tx_info)
981 {
982 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
983 	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
984 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
985 	struct ieee80211_key_conf *key = info->control.hw_key;
986 	struct ieee80211_vif *vif = info->control.vif;
987 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
988 	struct mt76_txwi_cache *t;
989 	struct mt7915_txp *txp;
990 	int id, i, nbuf = tx_info->nbuf - 1;
991 	u8 *txwi = (u8 *)txwi_ptr;
992 
993 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
994 		return -EINVAL;
995 
996 	if (!wcid)
997 		wcid = &dev->mt76.global_wcid;
998 
999 	mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
1000 			      false);
1001 
1002 	cb->wcid = wcid->idx;
1003 
1004 	txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
1005 	for (i = 0; i < nbuf; i++) {
1006 		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
1007 		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
1008 	}
1009 	txp->nbuf = nbuf;
1010 
1011 	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST);
1012 
1013 	if (!key)
1014 		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
1015 
1016 	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
1017 	    ieee80211_is_mgmt(hdr->frame_control))
1018 		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
1019 
1020 	if (vif) {
1021 		struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1022 
1023 		txp->bss_idx = mvif->idx;
1024 	}
1025 
1026 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
1027 	t->skb = tx_info->skb;
1028 
1029 	id = mt76_token_consume(mdev, &t);
1030 	if (id < 0)
1031 		return id;
1032 
1033 	txp->token = cpu_to_le16(id);
1034 	if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
1035 		txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
1036 	else
1037 		txp->rept_wds_wcid = cpu_to_le16(0x3ff);
1038 	tx_info->skb = DMA_DUMMY_DATA;
1039 
1040 	/* pass partial skb header to fw */
1041 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
1042 	tx_info->buf[1].skip_unmap = true;
1043 	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1044 
1045 	return 0;
1046 }
1047 
1048 static void
1049 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
1050 {
1051 	struct mt7915_sta *msta;
1052 	u16 fc, tid;
1053 	u32 val;
1054 
1055 	if (!sta || !sta->ht_cap.ht_supported)
1056 		return;
1057 
1058 	tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
1059 	if (tid >= 6) /* skip VO queue */
1060 		return;
1061 
1062 	val = le32_to_cpu(txwi[2]);
1063 	fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
1064 	     FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
1065 	if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1066 		return;
1067 
1068 	msta = (struct mt7915_sta *)sta->drv_priv;
1069 	if (!test_and_set_bit(tid, &msta->ampdu_state))
1070 		ieee80211_start_tx_ba_session(sta, tid, 0);
1071 }
1072 
1073 static void
1074 mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
1075 			  struct ieee80211_sta *sta, u8 stat,
1076 			  struct list_head *free_list)
1077 {
1078 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1079 	struct ieee80211_tx_status status = {
1080 		.sta = sta,
1081 		.info = info,
1082 		.skb = skb,
1083 		.free_list = free_list,
1084 	};
1085 	struct ieee80211_hw *hw;
1086 
1087 	if (sta) {
1088 		struct mt7915_sta *msta;
1089 
1090 		msta = (struct mt7915_sta *)sta->drv_priv;
1091 		status.rate = &msta->stats.tx_rate;
1092 	}
1093 
1094 #ifdef CONFIG_NL80211_TESTMODE
1095 	if (mt76_is_testmode_skb(mdev, skb, &hw)) {
1096 		struct mt7915_phy *phy = mt7915_hw_phy(hw);
1097 		struct ieee80211_vif *vif = phy->monitor_vif;
1098 		struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1099 
1100 		mt76_tx_complete_skb(mdev, mvif->sta.wcid.idx, skb);
1101 		return;
1102 	}
1103 #endif
1104 
1105 	hw = mt76_tx_status_get_hw(mdev, skb);
1106 
1107 	if (info->flags & IEEE80211_TX_CTL_AMPDU)
1108 		info->flags |= IEEE80211_TX_STAT_AMPDU;
1109 
1110 	if (stat)
1111 		ieee80211_tx_info_clear_status(info);
1112 
1113 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
1114 		info->flags |= IEEE80211_TX_STAT_ACK;
1115 
1116 	info->status.tx_time = 0;
1117 	ieee80211_tx_status_ext(hw, &status);
1118 }
1119 
1120 void mt7915_txp_skb_unmap(struct mt76_dev *dev,
1121 			  struct mt76_txwi_cache *t)
1122 {
1123 	struct mt7915_txp *txp;
1124 	int i;
1125 
1126 	txp = mt7915_txwi_to_txp(dev, t);
1127 	for (i = 0; i < txp->nbuf; i++)
1128 		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
1129 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
1130 }
1131 
1132 void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
1133 {
1134 	struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data;
1135 	struct mt76_dev *mdev = &dev->mt76;
1136 	struct mt76_phy *mphy_ext = mdev->phy2;
1137 	struct mt76_txwi_cache *txwi;
1138 	struct ieee80211_sta *sta = NULL;
1139 	LIST_HEAD(free_list);
1140 	struct sk_buff *tmp;
1141 	u8 i, count;
1142 	bool wake = false;
1143 
1144 	/* clean DMA queues and unmap buffers first */
1145 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1146 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1147 	if (mphy_ext) {
1148 		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
1149 		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
1150 	}
1151 
1152 	/*
1153 	 * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
1154 	 * to the time ack is received or dropped by hw (air + hw queue time).
1155 	 * Should avoid accessing WTBL to get Tx airtime, and use it instead.
1156 	 */
1157 	count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
1158 	for (i = 0; i < count; i++) {
1159 		u32 msdu, info = le32_to_cpu(free->info[i]);
1160 		u8 stat;
1161 
1162 		/*
1163 		 * 1'b1: new wcid pair.
1164 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
1165 		 */
1166 		if (info & MT_TX_FREE_PAIR) {
1167 			struct mt7915_sta *msta;
1168 			struct mt7915_phy *phy;
1169 			struct mt76_wcid *wcid;
1170 			u16 idx;
1171 
1172 			count++;
1173 			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
1174 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
1175 			sta = wcid_to_sta(wcid);
1176 			if (!sta)
1177 				continue;
1178 
1179 			msta = container_of(wcid, struct mt7915_sta, wcid);
1180 			phy = msta->vif->phy;
1181 			spin_lock_bh(&dev->sta_poll_lock);
1182 			if (list_empty(&msta->stats_list))
1183 				list_add_tail(&msta->stats_list, &phy->stats_list);
1184 			if (list_empty(&msta->poll_list))
1185 				list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1186 			spin_unlock_bh(&dev->sta_poll_lock);
1187 			continue;
1188 		}
1189 
1190 		msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
1191 		stat = FIELD_GET(MT_TX_FREE_STATUS, info);
1192 
1193 		txwi = mt76_token_release(mdev, msdu, &wake);
1194 		if (!txwi)
1195 			continue;
1196 
1197 		mt7915_txp_skb_unmap(mdev, txwi);
1198 		if (txwi->skb) {
1199 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
1200 			void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
1201 
1202 			if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1203 				mt7915_tx_check_aggr(sta, txwi_ptr);
1204 
1205 			if (sta && !info->tx_time_est) {
1206 				struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1207 				int pending;
1208 
1209 				pending = atomic_dec_return(&wcid->non_aql_packets);
1210 				if (pending < 0)
1211 					atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
1212 			}
1213 
1214 			mt7915_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
1215 			txwi->skb = NULL;
1216 		}
1217 
1218 		mt76_put_txwi(mdev, txwi);
1219 	}
1220 
1221 	mt7915_mac_sta_poll(dev);
1222 
1223 	if (wake)
1224 		mt76_set_tx_blocked(&dev->mt76, false);
1225 
1226 	mt76_worker_schedule(&dev->mt76.tx_worker);
1227 
1228 	napi_consume_skb(skb, 1);
1229 
1230 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
1231 		skb_list_del_init(skb);
1232 		napi_consume_skb(skb, 1);
1233 	}
1234 }
1235 
1236 void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1237 {
1238 	struct mt7915_dev *dev;
1239 
1240 	if (!e->txwi) {
1241 		dev_kfree_skb_any(e->skb);
1242 		return;
1243 	}
1244 
1245 	dev = container_of(mdev, struct mt7915_dev, mt76);
1246 
1247 	/* error path */
1248 	if (e->skb == DMA_DUMMY_DATA) {
1249 		struct mt76_txwi_cache *t;
1250 		struct mt7915_txp *txp;
1251 
1252 		txp = mt7915_txwi_to_txp(mdev, e->txwi);
1253 		t = mt76_token_put(mdev, le16_to_cpu(txp->token));
1254 		e->skb = t ? t->skb : NULL;
1255 	}
1256 
1257 	if (e->skb) {
1258 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
1259 		struct mt76_wcid *wcid;
1260 
1261 		wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
1262 
1263 		mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
1264 					  NULL);
1265 	}
1266 }
1267 
1268 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
1269 {
1270 	struct mt7915_dev *dev = phy->dev;
1271 	bool ext_phy = phy != &dev->phy;
1272 	u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
1273 
1274 	mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
1275 	mt76_set(dev, reg, BIT(11) | BIT(9));
1276 }
1277 
1278 void mt7915_mac_reset_counters(struct mt7915_phy *phy)
1279 {
1280 	struct mt7915_dev *dev = phy->dev;
1281 	bool ext_phy = phy != &dev->phy;
1282 	int i;
1283 
1284 	for (i = 0; i < 4; i++) {
1285 		mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
1286 		mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
1287 	}
1288 
1289 	if (ext_phy) {
1290 		dev->mt76.phy2->survey_time = ktime_get_boottime();
1291 		i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
1292 	} else {
1293 		dev->mt76.phy.survey_time = ktime_get_boottime();
1294 		i = 0;
1295 	}
1296 	memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
1297 
1298 	/* reset airtime counters */
1299 	mt76_rr(dev, MT_MIB_SDR9(ext_phy));
1300 	mt76_rr(dev, MT_MIB_SDR36(ext_phy));
1301 	mt76_rr(dev, MT_MIB_SDR37(ext_phy));
1302 
1303 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(ext_phy),
1304 		 MT_WF_RMAC_MIB_RXTIME_CLR);
1305 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
1306 		 MT_WF_RMAC_MIB_RXTIME_CLR);
1307 }
1308 
1309 void mt7915_mac_set_timing(struct mt7915_phy *phy)
1310 {
1311 	s16 coverage_class = phy->coverage_class;
1312 	struct mt7915_dev *dev = phy->dev;
1313 	bool ext_phy = phy != &dev->phy;
1314 	u32 val, reg_offset;
1315 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1316 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1317 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1318 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1319 	int sifs, offset;
1320 	bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
1321 
1322 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1323 		return;
1324 
1325 	if (is_5ghz)
1326 		sifs = 16;
1327 	else
1328 		sifs = 10;
1329 
1330 	if (ext_phy) {
1331 		coverage_class = max_t(s16, dev->phy.coverage_class,
1332 				       coverage_class);
1333 	} else {
1334 		struct mt7915_phy *phy_ext = mt7915_ext_phy(dev);
1335 
1336 		if (phy_ext)
1337 			coverage_class = max_t(s16, phy_ext->coverage_class,
1338 					       coverage_class);
1339 	}
1340 	mt76_set(dev, MT_ARB_SCR(ext_phy),
1341 		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1342 	udelay(1);
1343 
1344 	offset = 3 * coverage_class;
1345 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1346 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1347 
1348 	mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
1349 	mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
1350 	mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
1351 		FIELD_PREP(MT_IFS_EIFS, 360) |
1352 		FIELD_PREP(MT_IFS_RIFS, 2) |
1353 		FIELD_PREP(MT_IFS_SIFS, sifs) |
1354 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1355 
1356 	if (phy->slottime < 20 || is_5ghz)
1357 		val = MT7915_CFEND_RATE_DEFAULT;
1358 	else
1359 		val = MT7915_CFEND_RATE_11B;
1360 
1361 	mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
1362 	mt76_clear(dev, MT_ARB_SCR(ext_phy),
1363 		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1364 }
1365 
1366 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
1367 {
1368 	mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy),
1369 		 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
1370 		 MT_WF_PHY_RXTD12_IRPI_SW_CLR);
1371 
1372 	mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy),
1373 		 FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
1374 }
1375 
1376 static u8
1377 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
1378 {
1379 	static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1380 	struct mt7915_dev *dev = phy->dev;
1381 	u32 val, sum = 0, n = 0;
1382 	int nss, i;
1383 
1384 	for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
1385 		u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support));
1386 
1387 		for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1388 			val = mt76_rr(dev, reg);
1389 			sum += val * nf_power[i];
1390 			n += val;
1391 		}
1392 	}
1393 
1394 	if (!n)
1395 		return 0;
1396 
1397 	return sum / n;
1398 }
1399 
1400 static void
1401 mt7915_phy_update_channel(struct mt76_phy *mphy, int idx)
1402 {
1403 	struct mt7915_dev *dev = container_of(mphy->dev, struct mt7915_dev, mt76);
1404 	struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
1405 	struct mt76_channel_state *state;
1406 	u64 busy_time, tx_time, rx_time, obss_time;
1407 	int nf;
1408 
1409 	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1410 				   MT_MIB_SDR9_BUSY_MASK);
1411 	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1412 				 MT_MIB_SDR36_TXTIME_MASK);
1413 	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1414 				 MT_MIB_SDR37_RXTIME_MASK);
1415 	obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
1416 				   MT_MIB_OBSSTIME_MASK);
1417 
1418 	nf = mt7915_phy_get_nf(phy, idx);
1419 	if (!phy->noise)
1420 		phy->noise = nf << 4;
1421 	else if (nf)
1422 		phy->noise += nf - (phy->noise >> 4);
1423 
1424 	state = mphy->chan_state;
1425 	state->cc_busy += busy_time;
1426 	state->cc_tx += tx_time;
1427 	state->cc_rx += rx_time + obss_time;
1428 	state->cc_bss_rx += rx_time;
1429 	state->noise = -(phy->noise >> 4);
1430 }
1431 
1432 void mt7915_update_channel(struct mt76_dev *mdev)
1433 {
1434 	struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1435 
1436 	mt7915_phy_update_channel(&mdev->phy, 0);
1437 	if (mdev->phy2)
1438 		mt7915_phy_update_channel(mdev->phy2, 1);
1439 
1440 	/* reset obss airtime */
1441 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1442 	if (mdev->phy2)
1443 		mt76_set(dev, MT_WF_RMAC_MIB_TIME0(1),
1444 			 MT_WF_RMAC_MIB_RXTIME_CLR);
1445 }
1446 
1447 static bool
1448 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
1449 {
1450 	bool ret;
1451 
1452 	ret = wait_event_timeout(dev->reset_wait,
1453 				 (READ_ONCE(dev->reset_state) & state),
1454 				 MT7915_RESET_TIMEOUT);
1455 
1456 	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1457 	return ret;
1458 }
1459 
1460 static void
1461 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1462 {
1463 	struct ieee80211_hw *hw = priv;
1464 
1465 	switch (vif->type) {
1466 	case NL80211_IFTYPE_MESH_POINT:
1467 	case NL80211_IFTYPE_ADHOC:
1468 	case NL80211_IFTYPE_AP:
1469 		mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
1470 		break;
1471 	default:
1472 		break;
1473 	}
1474 }
1475 
1476 static void
1477 mt7915_update_beacons(struct mt7915_dev *dev)
1478 {
1479 	ieee80211_iterate_active_interfaces(dev->mt76.hw,
1480 		IEEE80211_IFACE_ITER_RESUME_ALL,
1481 		mt7915_update_vif_beacon, dev->mt76.hw);
1482 
1483 	if (!dev->mt76.phy2)
1484 		return;
1485 
1486 	ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
1487 		IEEE80211_IFACE_ITER_RESUME_ALL,
1488 		mt7915_update_vif_beacon, dev->mt76.phy2->hw);
1489 }
1490 
1491 static void
1492 mt7915_dma_reset(struct mt7915_dev *dev)
1493 {
1494 	struct mt76_phy *mphy_ext = dev->mt76.phy2;
1495 	u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
1496 	int i;
1497 
1498 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
1499 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1500 	mt76_clear(dev, MT_WFDMA1_GLO_CFG,
1501 		   MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
1502 	if (dev->hif2) {
1503 		mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1504 			   (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1505 			    MT_WFDMA0_GLO_CFG_RX_DMA_EN));
1506 		mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
1507 			   (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
1508 			    MT_WFDMA1_GLO_CFG_RX_DMA_EN));
1509 	}
1510 
1511 	usleep_range(1000, 2000);
1512 
1513 	for (i = 0; i < __MT_TXQ_MAX; i++) {
1514 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
1515 		if (mphy_ext)
1516 			mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
1517 	}
1518 
1519 	for (i = 0; i < __MT_MCUQ_MAX; i++)
1520 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
1521 
1522 	mt76_for_each_q_rx(&dev->mt76, i)
1523 		mt76_queue_rx_reset(dev, i);
1524 
1525 	mt76_tx_status_check(&dev->mt76, NULL, true);
1526 
1527 	/* re-init prefetch settings after reset */
1528 	mt7915_dma_prefetch(dev);
1529 
1530 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
1531 		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1532 	mt76_set(dev, MT_WFDMA1_GLO_CFG,
1533 		 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
1534 	if (dev->hif2) {
1535 		mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1536 			(MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1537 			 MT_WFDMA0_GLO_CFG_RX_DMA_EN));
1538 		mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
1539 			(MT_WFDMA1_GLO_CFG_TX_DMA_EN |
1540 			 MT_WFDMA1_GLO_CFG_RX_DMA_EN));
1541 	}
1542 }
1543 
1544 void mt7915_tx_token_put(struct mt7915_dev *dev)
1545 {
1546 	struct mt76_txwi_cache *txwi;
1547 	int id;
1548 
1549 	spin_lock_bh(&dev->mt76.token_lock);
1550 	idr_for_each_entry(&dev->mt76.token, txwi, id) {
1551 		mt7915_txp_skb_unmap(&dev->mt76, txwi);
1552 		if (txwi->skb) {
1553 			struct ieee80211_hw *hw;
1554 
1555 			hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
1556 			ieee80211_free_txskb(hw, txwi->skb);
1557 		}
1558 		mt76_put_txwi(&dev->mt76, txwi);
1559 		dev->mt76.token_count--;
1560 	}
1561 	spin_unlock_bh(&dev->mt76.token_lock);
1562 	idr_destroy(&dev->mt76.token);
1563 }
1564 
1565 /* system error recovery */
1566 void mt7915_mac_reset_work(struct work_struct *work)
1567 {
1568 	struct mt7915_phy *phy2;
1569 	struct mt76_phy *ext_phy;
1570 	struct mt7915_dev *dev;
1571 
1572 	dev = container_of(work, struct mt7915_dev, reset_work);
1573 	ext_phy = dev->mt76.phy2;
1574 	phy2 = ext_phy ? ext_phy->priv : NULL;
1575 
1576 	if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
1577 		return;
1578 
1579 	ieee80211_stop_queues(mt76_hw(dev));
1580 	if (ext_phy)
1581 		ieee80211_stop_queues(ext_phy->hw);
1582 
1583 	set_bit(MT76_RESET, &dev->mphy.state);
1584 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1585 	wake_up(&dev->mt76.mcu.wait);
1586 	cancel_delayed_work_sync(&dev->mphy.mac_work);
1587 	if (phy2) {
1588 		set_bit(MT76_RESET, &phy2->mt76->state);
1589 		cancel_delayed_work_sync(&phy2->mt76->mac_work);
1590 	}
1591 	/* lock/unlock all queues to ensure that no tx is pending */
1592 	mt76_txq_schedule_all(&dev->mphy);
1593 	if (ext_phy)
1594 		mt76_txq_schedule_all(ext_phy);
1595 
1596 	mt76_worker_disable(&dev->mt76.tx_worker);
1597 	napi_disable(&dev->mt76.napi[0]);
1598 	napi_disable(&dev->mt76.napi[1]);
1599 	napi_disable(&dev->mt76.napi[2]);
1600 	napi_disable(&dev->mt76.tx_napi);
1601 
1602 	mutex_lock(&dev->mt76.mutex);
1603 
1604 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1605 
1606 	if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1607 		mt7915_dma_reset(dev);
1608 
1609 		mt7915_tx_token_put(dev);
1610 		idr_init(&dev->mt76.token);
1611 
1612 		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1613 		mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1614 	}
1615 
1616 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1617 	clear_bit(MT76_RESET, &dev->mphy.state);
1618 	if (phy2)
1619 		clear_bit(MT76_RESET, &phy2->mt76->state);
1620 
1621 	mt76_worker_enable(&dev->mt76.tx_worker);
1622 	napi_enable(&dev->mt76.tx_napi);
1623 	napi_schedule(&dev->mt76.tx_napi);
1624 
1625 	napi_enable(&dev->mt76.napi[0]);
1626 	napi_schedule(&dev->mt76.napi[0]);
1627 
1628 	napi_enable(&dev->mt76.napi[1]);
1629 	napi_schedule(&dev->mt76.napi[1]);
1630 
1631 	napi_enable(&dev->mt76.napi[2]);
1632 	napi_schedule(&dev->mt76.napi[2]);
1633 
1634 	ieee80211_wake_queues(mt76_hw(dev));
1635 	if (ext_phy)
1636 		ieee80211_wake_queues(ext_phy->hw);
1637 
1638 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1639 	mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1640 
1641 	mutex_unlock(&dev->mt76.mutex);
1642 
1643 	mt7915_update_beacons(dev);
1644 
1645 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1646 				     MT7915_WATCHDOG_TIME);
1647 	if (phy2)
1648 		ieee80211_queue_delayed_work(ext_phy->hw,
1649 					     &phy2->mt76->mac_work,
1650 					     MT7915_WATCHDOG_TIME);
1651 }
1652 
1653 static void
1654 mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
1655 {
1656 	struct mt7915_dev *dev = phy->dev;
1657 	struct mib_stats *mib = &phy->mib;
1658 	bool ext_phy = phy != &dev->phy;
1659 	int i, aggr0, aggr1;
1660 
1661 	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
1662 					   MT_MIB_SDR3_FCS_ERR_MASK);
1663 
1664 	aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
1665 	for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
1666 		u32 val;
1667 
1668 		val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
1669 		mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1670 		mib->ack_fail_cnt +=
1671 			FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
1672 
1673 		val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
1674 		mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1675 		mib->rts_retries_cnt +=
1676 			FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1677 
1678 		val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
1679 		dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
1680 		dev->mt76.aggr_stats[aggr0++] += val >> 16;
1681 
1682 		val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
1683 		dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
1684 		dev->mt76.aggr_stats[aggr1++] += val >> 16;
1685 	}
1686 }
1687 
1688 static void
1689 mt7915_mac_sta_stats_work(struct mt7915_phy *phy)
1690 {
1691 	struct mt7915_dev *dev = phy->dev;
1692 	struct mt7915_sta *msta;
1693 	LIST_HEAD(list);
1694 
1695 	spin_lock_bh(&dev->sta_poll_lock);
1696 	list_splice_init(&phy->stats_list, &list);
1697 
1698 	while (!list_empty(&list)) {
1699 		msta = list_first_entry(&list, struct mt7915_sta, stats_list);
1700 		list_del_init(&msta->stats_list);
1701 		spin_unlock_bh(&dev->sta_poll_lock);
1702 
1703 		/* use MT_TX_FREE_RATE to report Tx rate for further devices */
1704 		mt7915_mcu_get_tx_rate(dev, RATE_CTRL_RU_INFO, msta->wcid.idx);
1705 
1706 		spin_lock_bh(&dev->sta_poll_lock);
1707 	}
1708 
1709 	spin_unlock_bh(&dev->sta_poll_lock);
1710 }
1711 
1712 void mt7915_mac_sta_rc_work(struct work_struct *work)
1713 {
1714 	struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
1715 	struct ieee80211_sta *sta;
1716 	struct ieee80211_vif *vif;
1717 	struct mt7915_sta *msta;
1718 	u32 changed;
1719 	LIST_HEAD(list);
1720 
1721 	spin_lock_bh(&dev->sta_poll_lock);
1722 	list_splice_init(&dev->sta_rc_list, &list);
1723 
1724 	while (!list_empty(&list)) {
1725 		msta = list_first_entry(&list, struct mt7915_sta, rc_list);
1726 		list_del_init(&msta->rc_list);
1727 		changed = msta->stats.changed;
1728 		msta->stats.changed = 0;
1729 		spin_unlock_bh(&dev->sta_poll_lock);
1730 
1731 		sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
1732 		vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
1733 
1734 		if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
1735 			       IEEE80211_RC_NSS_CHANGED |
1736 			       IEEE80211_RC_BW_CHANGED))
1737 			mt7915_mcu_add_rate_ctrl(dev, vif, sta);
1738 
1739 		if (changed & IEEE80211_RC_SMPS_CHANGED)
1740 			mt7915_mcu_add_smps(dev, vif, sta);
1741 
1742 		spin_lock_bh(&dev->sta_poll_lock);
1743 	}
1744 
1745 	spin_unlock_bh(&dev->sta_poll_lock);
1746 }
1747 
1748 void mt7915_mac_work(struct work_struct *work)
1749 {
1750 	struct mt7915_phy *phy;
1751 	struct mt76_phy *mphy;
1752 
1753 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1754 					       mac_work.work);
1755 	phy = mphy->priv;
1756 
1757 	mutex_lock(&mphy->dev->mutex);
1758 
1759 	mt76_update_survey(mphy->dev);
1760 	if (++mphy->mac_work_count == 5) {
1761 		mphy->mac_work_count = 0;
1762 
1763 		mt7915_mac_update_mib_stats(phy);
1764 	}
1765 
1766 	if (++phy->sta_work_count == 10) {
1767 		phy->sta_work_count = 0;
1768 		mt7915_mac_sta_stats_work(phy);
1769 	}
1770 
1771 	mutex_unlock(&mphy->dev->mutex);
1772 
1773 	ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1774 				     MT7915_WATCHDOG_TIME);
1775 }
1776 
1777 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
1778 {
1779 	struct mt7915_dev *dev = phy->dev;
1780 
1781 	if (phy->rdd_state & BIT(0))
1782 		mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
1783 	if (phy->rdd_state & BIT(1))
1784 		mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
1785 }
1786 
1787 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
1788 {
1789 	int err;
1790 
1791 	err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
1792 	if (err < 0)
1793 		return err;
1794 
1795 	return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
1796 }
1797 
1798 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
1799 {
1800 	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1801 	struct mt7915_dev *dev = phy->dev;
1802 	bool ext_phy = phy != &dev->phy;
1803 	int err;
1804 
1805 	/* start CAC */
1806 	err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
1807 	if (err < 0)
1808 		return err;
1809 
1810 	err = mt7915_dfs_start_rdd(dev, ext_phy);
1811 	if (err < 0)
1812 		return err;
1813 
1814 	phy->rdd_state |= BIT(ext_phy);
1815 
1816 	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
1817 	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
1818 		err = mt7915_dfs_start_rdd(dev, 1);
1819 		if (err < 0)
1820 			return err;
1821 
1822 		phy->rdd_state |= BIT(1);
1823 	}
1824 
1825 	return 0;
1826 }
1827 
1828 static int
1829 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
1830 {
1831 	const struct mt7915_dfs_radar_spec *radar_specs;
1832 	struct mt7915_dev *dev = phy->dev;
1833 	int err, i;
1834 
1835 	switch (dev->mt76.region) {
1836 	case NL80211_DFS_FCC:
1837 		radar_specs = &fcc_radar_specs;
1838 		err = mt7915_mcu_set_fcc5_lpn(dev, 8);
1839 		if (err < 0)
1840 			return err;
1841 		break;
1842 	case NL80211_DFS_ETSI:
1843 		radar_specs = &etsi_radar_specs;
1844 		break;
1845 	case NL80211_DFS_JP:
1846 		radar_specs = &jp_radar_specs;
1847 		break;
1848 	default:
1849 		return -EINVAL;
1850 	}
1851 
1852 	for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
1853 		err = mt7915_mcu_set_radar_th(dev, i,
1854 					      &radar_specs->radar_pattern[i]);
1855 		if (err < 0)
1856 			return err;
1857 	}
1858 
1859 	return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
1860 }
1861 
1862 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
1863 {
1864 	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1865 	struct mt7915_dev *dev = phy->dev;
1866 	bool ext_phy = phy != &dev->phy;
1867 	int err;
1868 
1869 	if (dev->mt76.region == NL80211_DFS_UNSET) {
1870 		phy->dfs_state = -1;
1871 		if (phy->rdd_state)
1872 			goto stop;
1873 
1874 		return 0;
1875 	}
1876 
1877 	if (test_bit(MT76_SCANNING, &phy->mt76->state))
1878 		return 0;
1879 
1880 	if (phy->dfs_state == chandef->chan->dfs_state)
1881 		return 0;
1882 
1883 	err = mt7915_dfs_init_radar_specs(phy);
1884 	if (err < 0) {
1885 		phy->dfs_state = -1;
1886 		goto stop;
1887 	}
1888 
1889 	phy->dfs_state = chandef->chan->dfs_state;
1890 
1891 	if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
1892 		if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
1893 			return mt7915_dfs_start_radar_detector(phy);
1894 
1895 		return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
1896 					  MT_RX_SEL0, 0);
1897 	}
1898 
1899 stop:
1900 	err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
1901 				 MT_RX_SEL0, 0);
1902 	if (err < 0)
1903 		return err;
1904 
1905 	mt7915_dfs_stop_radar_detector(phy);
1906 	return 0;
1907 }
1908