xref: /linux/drivers/net/wireless/mediatek/mt76/mt7921/mac.c (revision 1d8efc741df80be940e1584b5ac613dc03d58bd6)
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/timekeeping.h>
6 #include "mt7921.h"
7 #include "../dma.h"
8 #include "mac.h"
9 
10 #define to_rssi(field, rxv)	((FIELD_GET(field, rxv) - 220) / 2)
11 
12 #define HE_BITS(f)		cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
13 #define HE_PREP(f, m, v)	le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
14 						 IEEE80211_RADIOTAP_HE_##f)
15 
16 static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
17 					    u16 idx, bool unicast)
18 {
19 	struct mt7921_sta *sta;
20 	struct mt76_wcid *wcid;
21 
22 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
23 		return NULL;
24 
25 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
26 	if (unicast || !wcid)
27 		return wcid;
28 
29 	if (!wcid->sta)
30 		return NULL;
31 
32 	sta = container_of(wcid, struct mt7921_sta, wcid);
33 	if (!sta->vif)
34 		return NULL;
35 
36 	return &sta->vif->sta.wcid;
37 }
38 
39 void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
40 {
41 }
42 
43 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
44 {
45 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
46 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
47 
48 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
49 			 0, 5000);
50 }
51 
52 static u32 mt7921_mac_wtbl_lmac_addr(struct mt7921_dev *dev, u16 wcid)
53 {
54 	mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
55 		FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
56 
57 	return MT_WTBL_LMAC_OFFS(wcid, 0);
58 }
59 
60 static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
61 {
62 	static const u8 ac_to_tid[] = {
63 		[IEEE80211_AC_BE] = 0,
64 		[IEEE80211_AC_BK] = 1,
65 		[IEEE80211_AC_VI] = 4,
66 		[IEEE80211_AC_VO] = 6
67 	};
68 	struct ieee80211_sta *sta;
69 	struct mt7921_sta *msta;
70 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
71 	LIST_HEAD(sta_poll_list);
72 	int i;
73 
74 	spin_lock_bh(&dev->sta_poll_lock);
75 	list_splice_init(&dev->sta_poll_list, &sta_poll_list);
76 	spin_unlock_bh(&dev->sta_poll_lock);
77 
78 	rcu_read_lock();
79 
80 	while (true) {
81 		bool clear = false;
82 		u32 addr;
83 		u16 idx;
84 
85 		spin_lock_bh(&dev->sta_poll_lock);
86 		if (list_empty(&sta_poll_list)) {
87 			spin_unlock_bh(&dev->sta_poll_lock);
88 			break;
89 		}
90 		msta = list_first_entry(&sta_poll_list,
91 					struct mt7921_sta, poll_list);
92 		list_del_init(&msta->poll_list);
93 		spin_unlock_bh(&dev->sta_poll_lock);
94 
95 		idx = msta->wcid.idx;
96 		addr = mt7921_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
97 
98 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
99 			u32 tx_last = msta->airtime_ac[i];
100 			u32 rx_last = msta->airtime_ac[i + 4];
101 
102 			msta->airtime_ac[i] = mt76_rr(dev, addr);
103 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
104 
105 			tx_time[i] = msta->airtime_ac[i] - tx_last;
106 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
107 
108 			if ((tx_last | rx_last) & BIT(30))
109 				clear = true;
110 
111 			addr += 8;
112 		}
113 
114 		if (clear) {
115 			mt7921_mac_wtbl_update(dev, idx,
116 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
117 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
118 		}
119 
120 		if (!msta->wcid.sta)
121 			continue;
122 
123 		sta = container_of((void *)msta, struct ieee80211_sta,
124 				   drv_priv);
125 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
126 			u8 q = mt7921_lmac_mapping(dev, i);
127 			u32 tx_cur = tx_time[q];
128 			u32 rx_cur = rx_time[q];
129 			u8 tid = ac_to_tid[i];
130 
131 			if (!tx_cur && !rx_cur)
132 				continue;
133 
134 			ieee80211_sta_register_airtime(sta, tid, tx_cur,
135 						       rx_cur);
136 		}
137 	}
138 
139 	rcu_read_unlock();
140 }
141 
142 static void
143 mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
144 				 struct ieee80211_radiotap_he *he,
145 				 __le32 *rxv)
146 {
147 	u32 ru_h, ru_l;
148 	u8 ru, offs = 0;
149 
150 	ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
151 	ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
152 	ru = (u8)(ru_l | ru_h << 4);
153 
154 	status->bw = RATE_INFO_BW_HE_RU;
155 
156 	switch (ru) {
157 	case 0 ... 36:
158 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
159 		offs = ru;
160 		break;
161 	case 37 ... 52:
162 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
163 		offs = ru - 37;
164 		break;
165 	case 53 ... 60:
166 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
167 		offs = ru - 53;
168 		break;
169 	case 61 ... 64:
170 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
171 		offs = ru - 61;
172 		break;
173 	case 65 ... 66:
174 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
175 		offs = ru - 65;
176 		break;
177 	case 67:
178 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
179 		break;
180 	case 68:
181 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
182 		break;
183 	}
184 
185 	he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
186 	he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
187 		     le16_encode_bits(offs,
188 				      IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
189 }
190 
191 static void
192 mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
193 			      struct mt76_rx_status *status,
194 			      __le32 *rxv, u32 phy)
195 {
196 	/* TODO: struct ieee80211_radiotap_he_mu */
197 	static const struct ieee80211_radiotap_he known = {
198 		.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
199 			 HE_BITS(DATA1_DATA_DCM_KNOWN) |
200 			 HE_BITS(DATA1_STBC_KNOWN) |
201 			 HE_BITS(DATA1_CODING_KNOWN) |
202 			 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
203 			 HE_BITS(DATA1_DOPPLER_KNOWN) |
204 			 HE_BITS(DATA1_BSS_COLOR_KNOWN),
205 		.data2 = HE_BITS(DATA2_GI_KNOWN) |
206 			 HE_BITS(DATA2_TXBF_KNOWN) |
207 			 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
208 			 HE_BITS(DATA2_TXOP_KNOWN),
209 	};
210 	struct ieee80211_radiotap_he *he = NULL;
211 	u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
212 
213 	he = skb_push(skb, sizeof(known));
214 	memcpy(he, &known, sizeof(known));
215 
216 	he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
217 		    HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
218 	he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
219 		    le16_encode_bits(ltf_size,
220 				     IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
221 	he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
222 		    HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
223 
224 	switch (phy) {
225 	case MT_PHY_TYPE_HE_SU:
226 		he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
227 			     HE_BITS(DATA1_UL_DL_KNOWN) |
228 			     HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
229 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN);
230 
231 		he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
232 			     HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
233 		he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
234 		break;
235 	case MT_PHY_TYPE_HE_EXT_SU:
236 		he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
237 			     HE_BITS(DATA1_UL_DL_KNOWN);
238 
239 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
240 		break;
241 	case MT_PHY_TYPE_HE_MU:
242 		he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
243 			     HE_BITS(DATA1_UL_DL_KNOWN) |
244 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN);
245 
246 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
247 		he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
248 
249 		mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
250 		break;
251 	case MT_PHY_TYPE_HE_TB:
252 		he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
253 			     HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
254 			     HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
255 			     HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
256 			     HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
257 
258 		he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
259 			     HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
260 			     HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
261 			     HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
262 
263 		mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
264 		break;
265 	default:
266 		break;
267 	}
268 }
269 
270 static void
271 mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
272 			    struct mt76_rx_status *status, u8 chfreq)
273 {
274 	if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
275 	    !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
276 	    !test_bit(MT76_STATE_ROC, &mphy->state)) {
277 		status->freq = mphy->chandef.chan->center_freq;
278 		status->band = mphy->chandef.chan->band;
279 		return;
280 	}
281 
282 	status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
283 	status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
284 }
285 
286 int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
287 {
288 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
289 	struct mt76_phy *mphy = &dev->mt76.phy;
290 	struct mt7921_phy *phy = &dev->phy;
291 	struct ieee80211_supported_band *sband;
292 	struct ieee80211_hdr *hdr;
293 	__le32 *rxd = (__le32 *)skb->data;
294 	__le32 *rxv = NULL;
295 	u32 mode = 0;
296 	u32 rxd1 = le32_to_cpu(rxd[1]);
297 	u32 rxd2 = le32_to_cpu(rxd[2]);
298 	u32 rxd3 = le32_to_cpu(rxd[3]);
299 	bool unicast, insert_ccmp_hdr = false;
300 	u8 remove_pad;
301 	int i, idx;
302 	u8 chfreq;
303 
304 	memset(status, 0, sizeof(*status));
305 
306 	if (rxd1 & MT_RXD1_NORMAL_BAND_IDX)
307 		return -EINVAL;
308 
309 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
310 		return -EINVAL;
311 
312 	chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
313 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
314 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
315 	status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
316 
317 	if (status->wcid) {
318 		struct mt7921_sta *msta;
319 
320 		msta = container_of(status->wcid, struct mt7921_sta, wcid);
321 		spin_lock_bh(&dev->sta_poll_lock);
322 		if (list_empty(&msta->poll_list))
323 			list_add_tail(&msta->poll_list, &dev->sta_poll_list);
324 		spin_unlock_bh(&dev->sta_poll_lock);
325 	}
326 
327 	mt7921_get_status_freq_info(dev, mphy, status, chfreq);
328 
329 	if (status->band == NL80211_BAND_5GHZ)
330 		sband = &mphy->sband_5g.sband;
331 	else
332 		sband = &mphy->sband_2g.sband;
333 
334 	if (!sband->channels)
335 		return -EINVAL;
336 
337 	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
338 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
339 
340 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
341 		status->flag |= RX_FLAG_MMIC_ERROR;
342 
343 	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
344 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
345 		status->flag |= RX_FLAG_DECRYPTED;
346 		status->flag |= RX_FLAG_IV_STRIPPED;
347 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
348 	}
349 
350 	if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
351 		status->flag |= RX_FLAG_AMPDU_DETAILS;
352 
353 		/* all subframes of an A-MPDU have the same timestamp */
354 		if (phy->rx_ampdu_ts != rxd[14]) {
355 			if (!++phy->ampdu_ref)
356 				phy->ampdu_ref++;
357 		}
358 		phy->rx_ampdu_ts = rxd[14];
359 
360 		status->ampdu_ref = phy->ampdu_ref;
361 	}
362 
363 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
364 
365 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
366 		return -EINVAL;
367 
368 	rxd += 6;
369 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
370 		rxd += 4;
371 		if ((u8 *)rxd - skb->data >= skb->len)
372 			return -EINVAL;
373 	}
374 
375 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
376 		u8 *data = (u8 *)rxd;
377 
378 		if (status->flag & RX_FLAG_DECRYPTED) {
379 			status->iv[0] = data[5];
380 			status->iv[1] = data[4];
381 			status->iv[2] = data[3];
382 			status->iv[3] = data[2];
383 			status->iv[4] = data[1];
384 			status->iv[5] = data[0];
385 
386 			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
387 		}
388 		rxd += 4;
389 		if ((u8 *)rxd - skb->data >= skb->len)
390 			return -EINVAL;
391 	}
392 
393 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
394 		rxd += 2;
395 		if ((u8 *)rxd - skb->data >= skb->len)
396 			return -EINVAL;
397 	}
398 
399 	/* RXD Group 3 - P-RXV */
400 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
401 		u32 v0, v1, v2;
402 
403 		rxv = rxd;
404 		rxd += 2;
405 		if ((u8 *)rxd - skb->data >= skb->len)
406 			return -EINVAL;
407 
408 		v0 = le32_to_cpu(rxv[0]);
409 		v1 = le32_to_cpu(rxv[1]);
410 		v2 = le32_to_cpu(rxv[2]);
411 
412 		if (v0 & MT_PRXV_HT_AD_CODE)
413 			status->enc_flags |= RX_ENC_FLAG_LDPC;
414 
415 		status->chains = mphy->antenna_mask;
416 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
417 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
418 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
419 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
420 		status->signal = status->chain_signal[0];
421 
422 		for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
423 			if (!(status->chains & BIT(i)))
424 				continue;
425 
426 			status->signal = max(status->signal,
427 					     status->chain_signal[i]);
428 		}
429 
430 		/* RXD Group 5 - C-RXV */
431 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
432 			u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
433 			u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
434 			bool cck = false;
435 
436 			rxd += 18;
437 			if ((u8 *)rxd - skb->data >= skb->len)
438 				return -EINVAL;
439 
440 			idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
441 			mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
442 
443 			switch (mode) {
444 			case MT_PHY_TYPE_CCK:
445 				cck = true;
446 				fallthrough;
447 			case MT_PHY_TYPE_OFDM:
448 				i = mt76_get_rate(&dev->mt76, sband, i, cck);
449 				break;
450 			case MT_PHY_TYPE_HT_GF:
451 			case MT_PHY_TYPE_HT:
452 				status->encoding = RX_ENC_HT;
453 				if (i > 31)
454 					return -EINVAL;
455 				break;
456 			case MT_PHY_TYPE_VHT:
457 				status->nss =
458 					FIELD_GET(MT_PRXV_NSTS, v0) + 1;
459 				status->encoding = RX_ENC_VHT;
460 				if (i > 9)
461 					return -EINVAL;
462 				break;
463 			case MT_PHY_TYPE_HE_MU:
464 				status->flag |= RX_FLAG_RADIOTAP_HE_MU;
465 				fallthrough;
466 			case MT_PHY_TYPE_HE_SU:
467 			case MT_PHY_TYPE_HE_EXT_SU:
468 			case MT_PHY_TYPE_HE_TB:
469 				status->nss =
470 					FIELD_GET(MT_PRXV_NSTS, v0) + 1;
471 				status->encoding = RX_ENC_HE;
472 				status->flag |= RX_FLAG_RADIOTAP_HE;
473 				i &= GENMASK(3, 0);
474 
475 				if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
476 					status->he_gi = gi;
477 
478 				status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
479 				break;
480 			default:
481 				return -EINVAL;
482 			}
483 			status->rate_idx = i;
484 
485 			switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
486 			case IEEE80211_STA_RX_BW_20:
487 				break;
488 			case IEEE80211_STA_RX_BW_40:
489 				if (mode & MT_PHY_TYPE_HE_EXT_SU &&
490 				    (idx & MT_PRXV_TX_ER_SU_106T)) {
491 					status->bw = RATE_INFO_BW_HE_RU;
492 					status->he_ru =
493 						NL80211_RATE_INFO_HE_RU_ALLOC_106;
494 				} else {
495 					status->bw = RATE_INFO_BW_40;
496 				}
497 				break;
498 			case IEEE80211_STA_RX_BW_80:
499 				status->bw = RATE_INFO_BW_80;
500 				break;
501 			case IEEE80211_STA_RX_BW_160:
502 				status->bw = RATE_INFO_BW_160;
503 				break;
504 			default:
505 				return -EINVAL;
506 			}
507 
508 			status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
509 			if (mode < MT_PHY_TYPE_HE_SU && gi)
510 				status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
511 		}
512 	}
513 
514 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
515 
516 	if (insert_ccmp_hdr) {
517 		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
518 
519 		mt76_insert_ccmp_hdr(skb, key_id);
520 	}
521 
522 	if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
523 		mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
524 
525 	hdr = mt76_skb_get_hdr(skb);
526 	if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
527 		return 0;
528 
529 	status->aggr = unicast &&
530 		       !ieee80211_is_qos_nullfunc(hdr->frame_control);
531 	status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
532 	status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
533 
534 	return 0;
535 }
536 
537 static void
538 mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
539 			   struct sk_buff *skb, struct mt76_wcid *wcid)
540 {
541 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
542 	u8 fc_type, fc_stype;
543 	bool wmm = false;
544 	u32 val;
545 
546 	if (wcid->sta) {
547 		struct ieee80211_sta *sta;
548 
549 		sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
550 		wmm = sta->wme;
551 	}
552 
553 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
554 	      FIELD_PREP(MT_TXD1_TID, tid);
555 
556 	if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
557 		val |= MT_TXD1_ETH_802_3;
558 
559 	txwi[1] |= cpu_to_le32(val);
560 
561 	fc_type = IEEE80211_FTYPE_DATA >> 2;
562 	fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
563 
564 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
565 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
566 
567 	txwi[2] |= cpu_to_le32(val);
568 
569 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
570 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
571 	txwi[7] |= cpu_to_le32(val);
572 }
573 
574 static void
575 mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
576 			    struct sk_buff *skb, struct ieee80211_key_conf *key)
577 {
578 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
579 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
580 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
581 	bool multicast = is_multicast_ether_addr(hdr->addr1);
582 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
583 	__le16 fc = hdr->frame_control;
584 	u8 fc_type, fc_stype;
585 	u32 val;
586 
587 	if (ieee80211_is_action(fc) &&
588 	    mgmt->u.action.category == WLAN_CATEGORY_BACK &&
589 	    mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
590 		u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
591 
592 		txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
593 		tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
594 	} else if (ieee80211_is_back_req(hdr->frame_control)) {
595 		struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
596 		u16 control = le16_to_cpu(bar->control);
597 
598 		tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
599 	}
600 
601 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
602 	      FIELD_PREP(MT_TXD1_HDR_INFO,
603 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
604 	      FIELD_PREP(MT_TXD1_TID, tid);
605 	txwi[1] |= cpu_to_le32(val);
606 
607 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
608 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
609 
610 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
611 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
612 	      FIELD_PREP(MT_TXD2_MULTICAST, multicast);
613 
614 	if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
615 	    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
616 		val |= MT_TXD2_BIP;
617 		txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
618 	}
619 
620 	if (!ieee80211_is_data(fc) || multicast)
621 		val |= MT_TXD2_FIX_RATE;
622 
623 	txwi[2] |= cpu_to_le32(val);
624 
625 	if (ieee80211_is_beacon(fc)) {
626 		txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
627 		txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
628 	}
629 
630 	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
631 		u16 seqno = le16_to_cpu(hdr->seq_ctrl);
632 
633 		if (ieee80211_is_back_req(hdr->frame_control)) {
634 			struct ieee80211_bar *bar;
635 
636 			bar = (struct ieee80211_bar *)skb->data;
637 			seqno = le16_to_cpu(bar->start_seq_num);
638 		}
639 
640 		val = MT_TXD3_SN_VALID |
641 		      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
642 		txwi[3] |= cpu_to_le32(val);
643 	}
644 
645 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
646 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
647 	txwi[7] |= cpu_to_le32(val);
648 }
649 
650 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
651 			   struct sk_buff *skb, struct mt76_wcid *wcid,
652 			   struct ieee80211_key_conf *key, bool beacon)
653 {
654 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
655 	struct ieee80211_vif *vif = info->control.vif;
656 	struct mt76_phy *mphy = &dev->mphy;
657 	u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
658 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
659 	u16 tx_count = 15;
660 	u32 val;
661 
662 	if (vif) {
663 		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
664 
665 		omac_idx = mvif->omac_idx;
666 		wmm_idx = mvif->wmm_idx;
667 	}
668 
669 	if (beacon) {
670 		p_fmt = MT_TX_TYPE_FW;
671 		q_idx = MT_LMAC_BCN0;
672 	} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
673 		p_fmt = MT_TX_TYPE_CT;
674 		q_idx = MT_LMAC_ALTX0;
675 	} else {
676 		p_fmt = MT_TX_TYPE_CT;
677 		q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
678 			mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
679 	}
680 
681 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
682 	      FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
683 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
684 	txwi[0] = cpu_to_le32(val);
685 
686 	val = MT_TXD1_LONG_FORMAT |
687 	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
688 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
689 
690 	txwi[1] = cpu_to_le32(val);
691 	txwi[2] = 0;
692 
693 	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
694 	if (key)
695 		val |= MT_TXD3_PROTECT_FRAME;
696 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
697 		val |= MT_TXD3_NO_ACK;
698 
699 	txwi[3] = cpu_to_le32(val);
700 	txwi[4] = 0;
701 	txwi[5] = 0;
702 	txwi[6] = 0;
703 	txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
704 
705 	if (is_8023)
706 		mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid);
707 	else
708 		mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
709 
710 	if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
711 		u16 rate;
712 
713 		/* hardware won't add HTC for mgmt/ctrl frame */
714 		txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
715 
716 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
717 			rate = MT7921_5G_RATE_DEFAULT;
718 		else
719 			rate = MT7921_2G_RATE_DEFAULT;
720 
721 		val = MT_TXD6_FIXED_BW |
722 		      FIELD_PREP(MT_TXD6_TX_RATE, rate);
723 		txwi[6] |= cpu_to_le32(val);
724 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
725 	}
726 }
727 
728 static void
729 mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
730 		    void *txp_ptr, u32 id)
731 {
732 	struct mt7921_hw_txp *txp = txp_ptr;
733 	struct mt7921_txp_ptr *ptr = &txp->ptr[0];
734 	int i, nbuf = tx_info->nbuf - 1;
735 
736 	tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
737 	tx_info->nbuf = 1;
738 
739 	txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
740 
741 	for (i = 0; i < nbuf; i++) {
742 		u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
743 		u32 addr = tx_info->buf[i + 1].addr;
744 
745 		if (i == nbuf - 1)
746 			len |= MT_TXD_LEN_LAST;
747 
748 		if (i & 1) {
749 			ptr->buf1 = cpu_to_le32(addr);
750 			ptr->len1 = cpu_to_le16(len);
751 			ptr++;
752 		} else {
753 			ptr->buf0 = cpu_to_le32(addr);
754 			ptr->len0 = cpu_to_le16(len);
755 		}
756 	}
757 }
758 
759 static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked)
760 {
761 	struct mt76_phy *mphy = &dev->mphy;
762 	struct mt76_queue *q;
763 
764 	q = mphy->q_tx[0];
765 	if (blocked == q->blocked)
766 		return;
767 
768 	q->blocked = blocked;
769 	if (!blocked)
770 		mt76_worker_schedule(&dev->mt76.tx_worker);
771 }
772 
773 int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
774 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
775 			  struct ieee80211_sta *sta,
776 			  struct mt76_tx_info *tx_info)
777 {
778 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
779 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
780 	struct ieee80211_key_conf *key = info->control.hw_key;
781 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
782 	struct mt76_txwi_cache *t;
783 	struct mt7921_txp_common *txp;
784 	int id;
785 	u8 *txwi = (u8 *)txwi_ptr;
786 
787 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
788 		return -EINVAL;
789 
790 	if (!wcid)
791 		wcid = &dev->mt76.global_wcid;
792 
793 	cb->wcid = wcid->idx;
794 
795 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
796 	t->skb = tx_info->skb;
797 
798 	spin_lock_bh(&dev->token_lock);
799 	id = idr_alloc(&dev->token, t, 0, MT7921_TOKEN_SIZE, GFP_ATOMIC);
800 	if (id >= 0)
801 		dev->token_count++;
802 
803 	if (dev->token_count >= MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR)
804 		mt7921_set_tx_blocked(dev, true);
805 	spin_unlock_bh(&dev->token_lock);
806 
807 	if (id < 0)
808 		return id;
809 
810 	mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
811 			      false);
812 
813 	txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
814 	memset(txp, 0, sizeof(struct mt7921_txp_common));
815 	mt7921_write_hw_txp(dev, tx_info, txp, id);
816 
817 	tx_info->skb = DMA_DUMMY_DATA;
818 
819 	return 0;
820 }
821 
822 static void
823 mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
824 {
825 	struct mt7921_sta *msta;
826 	u16 fc, tid;
827 	u32 val;
828 
829 	if (!sta || !sta->ht_cap.ht_supported)
830 		return;
831 
832 	tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
833 	if (tid >= 6) /* skip VO queue */
834 		return;
835 
836 	val = le32_to_cpu(txwi[2]);
837 	fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
838 	     FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
839 	if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
840 		return;
841 
842 	msta = (struct mt7921_sta *)sta->drv_priv;
843 	if (!test_and_set_bit(tid, &msta->ampdu_state))
844 		ieee80211_start_tx_ba_session(sta, tid, 0);
845 }
846 
847 static void
848 mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
849 			  struct ieee80211_sta *sta, u8 stat,
850 			  struct list_head *free_list)
851 {
852 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
853 	struct ieee80211_tx_status status = {
854 		.sta = sta,
855 		.info = info,
856 		.skb = skb,
857 		.free_list = free_list,
858 	};
859 	struct ieee80211_hw *hw;
860 
861 	if (sta) {
862 		struct mt7921_sta *msta;
863 
864 		msta = (struct mt7921_sta *)sta->drv_priv;
865 		status.rate = &msta->stats.tx_rate;
866 	}
867 
868 	hw = mt76_tx_status_get_hw(mdev, skb);
869 
870 	if (info->flags & IEEE80211_TX_CTL_AMPDU)
871 		info->flags |= IEEE80211_TX_STAT_AMPDU;
872 
873 	if (stat)
874 		ieee80211_tx_info_clear_status(info);
875 
876 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
877 		info->flags |= IEEE80211_TX_STAT_ACK;
878 
879 	info->status.tx_time = 0;
880 	ieee80211_tx_status_ext(hw, &status);
881 }
882 
883 void mt7921_txp_skb_unmap(struct mt76_dev *dev,
884 			  struct mt76_txwi_cache *t)
885 {
886 	struct mt7921_txp_common *txp;
887 	int i;
888 
889 	txp = mt7921_txwi_to_txp(dev, t);
890 
891 	for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
892 		struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
893 		bool last;
894 		u16 len;
895 
896 		len = le16_to_cpu(ptr->len0);
897 		last = len & MT_TXD_LEN_LAST;
898 		len &= MT_TXD_LEN_MASK;
899 		dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
900 				 DMA_TO_DEVICE);
901 		if (last)
902 			break;
903 
904 		len = le16_to_cpu(ptr->len1);
905 		last = len & MT_TXD_LEN_LAST;
906 		len &= MT_TXD_LEN_MASK;
907 		dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
908 				 DMA_TO_DEVICE);
909 		if (last)
910 			break;
911 	}
912 }
913 
914 void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
915 {
916 	struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
917 	struct mt76_dev *mdev = &dev->mt76;
918 	struct mt76_txwi_cache *txwi;
919 	struct ieee80211_sta *sta = NULL;
920 	LIST_HEAD(free_list);
921 	struct sk_buff *tmp;
922 	bool wake = false;
923 	u8 i, count;
924 
925 	/* clean DMA queues and unmap buffers first */
926 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
927 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
928 
929 	/* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
930 	 * to the time ack is received or dropped by hw (air + hw queue time).
931 	 * Should avoid accessing WTBL to get Tx airtime, and use it instead.
932 	 */
933 	count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
934 	for (i = 0; i < count; i++) {
935 		u32 msdu, info = le32_to_cpu(free->info[i]);
936 		u8 stat;
937 
938 		/* 1'b1: new wcid pair.
939 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
940 		 */
941 		if (info & MT_TX_FREE_PAIR) {
942 			struct mt7921_sta *msta;
943 			struct mt7921_phy *phy;
944 			struct mt76_wcid *wcid;
945 			u16 idx;
946 
947 			count++;
948 			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
949 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
950 			sta = wcid_to_sta(wcid);
951 			if (!sta)
952 				continue;
953 
954 			msta = container_of(wcid, struct mt7921_sta, wcid);
955 			phy = msta->vif->phy;
956 			spin_lock_bh(&dev->sta_poll_lock);
957 			if (list_empty(&msta->stats_list))
958 				list_add_tail(&msta->stats_list, &phy->stats_list);
959 			if (list_empty(&msta->poll_list))
960 				list_add_tail(&msta->poll_list, &dev->sta_poll_list);
961 			spin_unlock_bh(&dev->sta_poll_lock);
962 			continue;
963 		}
964 
965 		msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
966 		stat = FIELD_GET(MT_TX_FREE_STATUS, info);
967 
968 		spin_lock_bh(&dev->token_lock);
969 		txwi = idr_remove(&dev->token, msdu);
970 		if (txwi)
971 			dev->token_count--;
972 		if (dev->token_count < MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR &&
973 		    dev->mphy.q_tx[0]->blocked)
974 			wake = true;
975 		spin_unlock_bh(&dev->token_lock);
976 
977 		if (!txwi)
978 			continue;
979 
980 		mt7921_txp_skb_unmap(mdev, txwi);
981 		if (txwi->skb) {
982 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
983 			void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
984 
985 			if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
986 				mt7921_tx_check_aggr(sta, txwi_ptr);
987 
988 			if (sta && !info->tx_time_est) {
989 				struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
990 				int pending;
991 
992 				pending = atomic_dec_return(&wcid->non_aql_packets);
993 				if (pending < 0)
994 					atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
995 			}
996 
997 			mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
998 			txwi->skb = NULL;
999 		}
1000 
1001 		mt76_put_txwi(mdev, txwi);
1002 	}
1003 
1004 	if (wake) {
1005 		spin_lock_bh(&dev->token_lock);
1006 		mt7921_set_tx_blocked(dev, false);
1007 		spin_unlock_bh(&dev->token_lock);
1008 	}
1009 
1010 	napi_consume_skb(skb, 1);
1011 
1012 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
1013 		skb_list_del_init(skb);
1014 		napi_consume_skb(skb, 1);
1015 	}
1016 
1017 	if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
1018 		return;
1019 
1020 	mt7921_mac_sta_poll(dev);
1021 
1022 	mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1023 
1024 	mt76_worker_schedule(&dev->mt76.tx_worker);
1025 }
1026 
1027 void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1028 {
1029 	struct mt7921_dev *dev;
1030 
1031 	if (!e->txwi) {
1032 		dev_kfree_skb_any(e->skb);
1033 		return;
1034 	}
1035 
1036 	dev = container_of(mdev, struct mt7921_dev, mt76);
1037 
1038 	/* error path */
1039 	if (e->skb == DMA_DUMMY_DATA) {
1040 		struct mt76_txwi_cache *t;
1041 		struct mt7921_txp_common *txp;
1042 		u16 token;
1043 
1044 		txp = mt7921_txwi_to_txp(mdev, e->txwi);
1045 
1046 		token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
1047 		spin_lock_bh(&dev->token_lock);
1048 		t = idr_remove(&dev->token, token);
1049 		spin_unlock_bh(&dev->token_lock);
1050 		e->skb = t ? t->skb : NULL;
1051 	}
1052 
1053 	if (e->skb) {
1054 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
1055 		struct mt76_wcid *wcid;
1056 
1057 		wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
1058 
1059 		mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
1060 					  NULL);
1061 	}
1062 }
1063 
1064 void mt7921_mac_reset_counters(struct mt7921_phy *phy)
1065 {
1066 	struct mt7921_dev *dev = phy->dev;
1067 	int i;
1068 
1069 	for (i = 0; i < 4; i++) {
1070 		mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1071 		mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1072 	}
1073 
1074 	dev->mt76.phy.survey_time = ktime_get_boottime();
1075 	memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2);
1076 
1077 	/* reset airtime counters */
1078 	mt76_rr(dev, MT_MIB_SDR9(0));
1079 	mt76_rr(dev, MT_MIB_SDR36(0));
1080 	mt76_rr(dev, MT_MIB_SDR37(0));
1081 
1082 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1083 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1084 }
1085 
1086 void mt7921_mac_set_timing(struct mt7921_phy *phy)
1087 {
1088 	s16 coverage_class = phy->coverage_class;
1089 	struct mt7921_dev *dev = phy->dev;
1090 	u32 val, reg_offset;
1091 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1092 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1093 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1094 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1095 	int sifs, offset;
1096 	bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
1097 
1098 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1099 		return;
1100 
1101 	if (is_5ghz)
1102 		sifs = 16;
1103 	else
1104 		sifs = 10;
1105 
1106 	mt76_set(dev, MT_ARB_SCR(0),
1107 		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1108 	udelay(1);
1109 
1110 	offset = 3 * coverage_class;
1111 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1112 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1113 
1114 	mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
1115 	mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
1116 	mt76_wr(dev, MT_TMAC_ICR0(0),
1117 		FIELD_PREP(MT_IFS_EIFS, 360) |
1118 		FIELD_PREP(MT_IFS_RIFS, 2) |
1119 		FIELD_PREP(MT_IFS_SIFS, sifs) |
1120 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1121 
1122 	if (phy->slottime < 20 || is_5ghz)
1123 		val = MT7921_CFEND_RATE_DEFAULT;
1124 	else
1125 		val = MT7921_CFEND_RATE_11B;
1126 
1127 	mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
1128 	mt76_clear(dev, MT_ARB_SCR(0),
1129 		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1130 }
1131 
1132 static u8
1133 mt7921_phy_get_nf(struct mt7921_phy *phy, int idx)
1134 {
1135 	return 0;
1136 }
1137 
1138 static void
1139 mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
1140 {
1141 	struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
1142 	struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
1143 	struct mt76_channel_state *state;
1144 	u64 busy_time, tx_time, rx_time, obss_time;
1145 	int nf;
1146 
1147 	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1148 				   MT_MIB_SDR9_BUSY_MASK);
1149 	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1150 				 MT_MIB_SDR36_TXTIME_MASK);
1151 	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1152 				 MT_MIB_SDR37_RXTIME_MASK);
1153 	obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
1154 				   MT_MIB_OBSSTIME_MASK);
1155 
1156 	nf = mt7921_phy_get_nf(phy, idx);
1157 	if (!phy->noise)
1158 		phy->noise = nf << 4;
1159 	else if (nf)
1160 		phy->noise += nf - (phy->noise >> 4);
1161 
1162 	state = mphy->chan_state;
1163 	state->cc_busy += busy_time;
1164 	state->cc_tx += tx_time;
1165 	state->cc_rx += rx_time + obss_time;
1166 	state->cc_bss_rx += rx_time;
1167 	state->noise = -(phy->noise >> 4);
1168 }
1169 
1170 void mt7921_update_channel(struct mt76_dev *mdev)
1171 {
1172 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1173 
1174 	if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
1175 		return;
1176 
1177 	mt7921_phy_update_channel(&mdev->phy, 0);
1178 	/* reset obss airtime */
1179 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1180 
1181 	mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1182 }
1183 
1184 static bool
1185 mt7921_wait_reset_state(struct mt7921_dev *dev, u32 state)
1186 {
1187 	bool ret;
1188 
1189 	ret = wait_event_timeout(dev->reset_wait,
1190 				 (READ_ONCE(dev->reset_state) & state),
1191 				 MT7921_RESET_TIMEOUT);
1192 
1193 	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1194 	return ret;
1195 }
1196 
1197 static void
1198 mt7921_dma_reset(struct mt7921_phy *phy)
1199 {
1200 	struct mt7921_dev *dev = phy->dev;
1201 	int i;
1202 
1203 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
1204 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1205 
1206 	usleep_range(1000, 2000);
1207 
1208 	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
1209 	for (i = 0; i < __MT_TXQ_MAX; i++)
1210 		mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
1211 
1212 	mt76_for_each_q_rx(&dev->mt76, i) {
1213 		mt76_queue_rx_reset(dev, i);
1214 	}
1215 
1216 	/* re-init prefetch settings after reset */
1217 	mt7921_dma_prefetch(dev);
1218 
1219 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
1220 		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1221 }
1222 
1223 void mt7921_tx_token_put(struct mt7921_dev *dev)
1224 {
1225 	struct mt76_txwi_cache *txwi;
1226 	int id;
1227 
1228 	spin_lock_bh(&dev->token_lock);
1229 	idr_for_each_entry(&dev->token, txwi, id) {
1230 		mt7921_txp_skb_unmap(&dev->mt76, txwi);
1231 		if (txwi->skb) {
1232 			struct ieee80211_hw *hw;
1233 
1234 			hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
1235 			ieee80211_free_txskb(hw, txwi->skb);
1236 		}
1237 		mt76_put_txwi(&dev->mt76, txwi);
1238 		dev->token_count--;
1239 	}
1240 	spin_unlock_bh(&dev->token_lock);
1241 	idr_destroy(&dev->token);
1242 }
1243 
1244 /* system error recovery */
1245 void mt7921_mac_reset_work(struct work_struct *work)
1246 {
1247 	struct mt7921_dev *dev;
1248 
1249 	dev = container_of(work, struct mt7921_dev, reset_work);
1250 
1251 	if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
1252 		return;
1253 
1254 	ieee80211_stop_queues(mt76_hw(dev));
1255 
1256 	set_bit(MT76_RESET, &dev->mphy.state);
1257 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1258 	wake_up(&dev->mt76.mcu.wait);
1259 	cancel_delayed_work_sync(&dev->mphy.mac_work);
1260 
1261 	/* lock/unlock all queues to ensure that no tx is pending */
1262 	mt76_txq_schedule_all(&dev->mphy);
1263 
1264 	mt76_worker_disable(&dev->mt76.tx_worker);
1265 	napi_disable(&dev->mt76.napi[0]);
1266 	napi_disable(&dev->mt76.napi[1]);
1267 	napi_disable(&dev->mt76.napi[2]);
1268 	napi_disable(&dev->mt76.tx_napi);
1269 
1270 	mt7921_mutex_acquire(dev);
1271 
1272 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1273 
1274 	mt7921_tx_token_put(dev);
1275 	idr_init(&dev->token);
1276 
1277 	if (mt7921_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1278 		mt7921_dma_reset(&dev->phy);
1279 
1280 		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1281 		mt7921_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1282 	}
1283 
1284 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1285 	clear_bit(MT76_RESET, &dev->mphy.state);
1286 
1287 	mt76_worker_enable(&dev->mt76.tx_worker);
1288 	napi_enable(&dev->mt76.tx_napi);
1289 	napi_schedule(&dev->mt76.tx_napi);
1290 
1291 	napi_enable(&dev->mt76.napi[0]);
1292 	napi_schedule(&dev->mt76.napi[0]);
1293 
1294 	napi_enable(&dev->mt76.napi[1]);
1295 	napi_schedule(&dev->mt76.napi[1]);
1296 
1297 	napi_enable(&dev->mt76.napi[2]);
1298 	napi_schedule(&dev->mt76.napi[2]);
1299 
1300 	ieee80211_wake_queues(mt76_hw(dev));
1301 
1302 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1303 	mt7921_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1304 
1305 	mt7921_mutex_release(dev);
1306 
1307 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1308 				     MT7921_WATCHDOG_TIME);
1309 }
1310 
1311 static void
1312 mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
1313 {
1314 	struct mt7921_dev *dev = phy->dev;
1315 	struct mib_stats *mib = &phy->mib;
1316 	int i, aggr0 = 0, aggr1;
1317 
1318 	memset(mib, 0, sizeof(*mib));
1319 
1320 	mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0),
1321 					  MT_MIB_SDR3_FCS_ERR_MASK);
1322 
1323 	for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
1324 		u32 val, val2;
1325 
1326 		val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i));
1327 
1328 		val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
1329 		if (val2 > mib->ack_fail_cnt)
1330 			mib->ack_fail_cnt = val2;
1331 
1332 		val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1333 		if (val2 > mib->ba_miss_cnt)
1334 			mib->ba_miss_cnt = val2;
1335 
1336 		val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i));
1337 		val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1338 		if (val2 > mib->rts_retries_cnt) {
1339 			mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1340 			mib->rts_retries_cnt = val2;
1341 		}
1342 
1343 		val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1344 		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1345 
1346 		dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
1347 		dev->mt76.aggr_stats[aggr0++] += val >> 16;
1348 		dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
1349 		dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
1350 	}
1351 }
1352 
1353 static void
1354 mt7921_mac_sta_stats_work(struct mt7921_phy *phy)
1355 {
1356 	struct mt7921_dev *dev = phy->dev;
1357 	struct mt7921_sta *msta;
1358 	LIST_HEAD(list);
1359 
1360 	spin_lock_bh(&dev->sta_poll_lock);
1361 	list_splice_init(&phy->stats_list, &list);
1362 
1363 	while (!list_empty(&list)) {
1364 		msta = list_first_entry(&list, struct mt7921_sta, stats_list);
1365 		list_del_init(&msta->stats_list);
1366 		spin_unlock_bh(&dev->sta_poll_lock);
1367 
1368 		/* query wtbl info to report tx rate for further devices */
1369 		mt7921_get_wtbl_info(dev, msta->wcid.idx);
1370 
1371 		spin_lock_bh(&dev->sta_poll_lock);
1372 	}
1373 
1374 	spin_unlock_bh(&dev->sta_poll_lock);
1375 }
1376 
1377 void mt7921_mac_work(struct work_struct *work)
1378 {
1379 	struct mt7921_phy *phy;
1380 	struct mt76_phy *mphy;
1381 
1382 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1383 					       mac_work.work);
1384 	phy = mphy->priv;
1385 
1386 	if (test_bit(MT76_STATE_PM, &mphy->state))
1387 		goto out;
1388 
1389 	mt7921_mutex_acquire(phy->dev);
1390 
1391 	mt76_update_survey(mphy->dev);
1392 	if (++mphy->mac_work_count == 5) {
1393 		mphy->mac_work_count = 0;
1394 
1395 		mt7921_mac_update_mib_stats(phy);
1396 	}
1397 	if (++phy->sta_work_count == 10) {
1398 		phy->sta_work_count = 0;
1399 		mt7921_mac_sta_stats_work(phy);
1400 	};
1401 
1402 	mt7921_mutex_release(phy->dev);
1403 
1404 out:
1405 	ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
1406 				     MT7921_WATCHDOG_TIME);
1407 }
1408 
1409 void mt7921_pm_wake_work(struct work_struct *work)
1410 {
1411 	struct mt7921_dev *dev;
1412 	struct mt76_phy *mphy;
1413 
1414 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1415 						pm.wake_work);
1416 	mphy = dev->phy.mt76;
1417 
1418 	if (!mt7921_mcu_drv_pmctrl(dev))
1419 		mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1420 	else
1421 		dev_err(mphy->dev->dev, "failed to wake device\n");
1422 
1423 	ieee80211_wake_queues(mphy->hw);
1424 	complete_all(&dev->pm.wake_cmpl);
1425 }
1426 
1427 void mt7921_pm_power_save_work(struct work_struct *work)
1428 {
1429 	struct mt7921_dev *dev;
1430 	unsigned long delta;
1431 
1432 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1433 						pm.ps_work.work);
1434 
1435 	delta = dev->pm.idle_timeout;
1436 	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
1437 		delta = dev->pm.last_activity + delta - jiffies;
1438 		goto out;
1439 	}
1440 
1441 	if (!mt7921_mcu_fw_pmctrl(dev))
1442 		return;
1443 out:
1444 	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
1445 }
1446 
1447 int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
1448 				 struct ieee80211_vif *vif,
1449 				 bool enable)
1450 {
1451 	struct mt7921_dev *dev = phy->dev;
1452 	bool ext_phy = phy != &dev->phy;
1453 	int err;
1454 
1455 	if (!dev->pm.enable)
1456 		return -EOPNOTSUPP;
1457 
1458 	err = mt7921_mcu_set_bss_pm(dev, vif, enable);
1459 	if (err)
1460 		return err;
1461 
1462 	if (enable) {
1463 		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
1464 		mt76_set(dev, MT_WF_RFCR(ext_phy),
1465 			 MT_WF_RFCR_DROP_OTHER_BEACON);
1466 	} else {
1467 		vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
1468 		mt76_clear(dev, MT_WF_RFCR(ext_phy),
1469 			   MT_WF_RFCR_DROP_OTHER_BEACON);
1470 	}
1471 
1472 	return 0;
1473 }
1474