xref: /linux/drivers/net/wireless/mediatek/mt76/mt7925/mac.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2023 MediaTek Inc. */
3 
4 #include <linux/devcoredump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/timekeeping.h>
7 #include "mt7925.h"
8 #include "../dma.h"
9 #include "mac.h"
10 #include "mcu.h"
11 
12 bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask)
13 {
14 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
15 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
16 
17 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
18 			 0, 5000);
19 }
20 
21 static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
22 {
23 	static const u8 ac_to_tid[] = {
24 		[IEEE80211_AC_BE] = 0,
25 		[IEEE80211_AC_BK] = 1,
26 		[IEEE80211_AC_VI] = 4,
27 		[IEEE80211_AC_VO] = 6
28 	};
29 	struct ieee80211_sta *sta;
30 	struct mt792x_sta *msta;
31 	struct mt792x_link_sta *mlink;
32 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
33 	LIST_HEAD(sta_poll_list);
34 	struct rate_info *rate;
35 	s8 rssi[4];
36 	int i;
37 
38 	spin_lock_bh(&dev->mt76.sta_poll_lock);
39 	list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
40 	spin_unlock_bh(&dev->mt76.sta_poll_lock);
41 
42 	while (true) {
43 		bool clear = false;
44 		u32 addr, val;
45 		u16 idx;
46 		u8 bw;
47 
48 		if (list_empty(&sta_poll_list))
49 			break;
50 		mlink = list_first_entry(&sta_poll_list,
51 					 struct mt792x_link_sta, wcid.poll_list);
52 		msta = container_of(mlink, struct mt792x_sta, deflink);
53 		spin_lock_bh(&dev->mt76.sta_poll_lock);
54 		list_del_init(&mlink->wcid.poll_list);
55 		spin_unlock_bh(&dev->mt76.sta_poll_lock);
56 
57 		idx = mlink->wcid.idx;
58 		addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET);
59 
60 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
61 			u32 tx_last = mlink->airtime_ac[i];
62 			u32 rx_last = mlink->airtime_ac[i + 4];
63 
64 			mlink->airtime_ac[i] = mt76_rr(dev, addr);
65 			mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
66 
67 			tx_time[i] = mlink->airtime_ac[i] - tx_last;
68 			rx_time[i] = mlink->airtime_ac[i + 4] - rx_last;
69 
70 			if ((tx_last | rx_last) & BIT(30))
71 				clear = true;
72 
73 			addr += 8;
74 		}
75 
76 		if (clear) {
77 			mt7925_mac_wtbl_update(dev, idx,
78 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
79 			memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
80 		}
81 
82 		if (!mlink->wcid.sta)
83 			continue;
84 
85 		sta = container_of((void *)msta, struct ieee80211_sta,
86 				   drv_priv);
87 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
88 			u8 q = mt76_connac_lmac_mapping(i);
89 			u32 tx_cur = tx_time[q];
90 			u32 rx_cur = rx_time[q];
91 			u8 tid = ac_to_tid[i];
92 
93 			if (!tx_cur && !rx_cur)
94 				continue;
95 
96 			ieee80211_sta_register_airtime(sta, tid, tx_cur,
97 						       rx_cur);
98 		}
99 
100 		/* We don't support reading GI info from txs packets.
101 		 * For accurate tx status reporting and AQL improvement,
102 		 * we need to make sure that flags match so polling GI
103 		 * from per-sta counters directly.
104 		 */
105 		rate = &mlink->wcid.rate;
106 
107 		switch (rate->bw) {
108 		case RATE_INFO_BW_160:
109 			bw = IEEE80211_STA_RX_BW_160;
110 			break;
111 		case RATE_INFO_BW_80:
112 			bw = IEEE80211_STA_RX_BW_80;
113 			break;
114 		case RATE_INFO_BW_40:
115 			bw = IEEE80211_STA_RX_BW_40;
116 			break;
117 		default:
118 			bw = IEEE80211_STA_RX_BW_20;
119 			break;
120 		}
121 
122 		addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 6);
123 		val = mt76_rr(dev, addr);
124 		if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
125 			addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 5);
126 			val = mt76_rr(dev, addr);
127 			rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
128 		} else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
129 			u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw;
130 
131 			rate->he_gi = (val & (0x3 << offs)) >> offs;
132 		} else if (rate->flags &
133 			   (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
134 			if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw))
135 				rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
136 			else
137 				rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
138 		}
139 
140 		/* get signal strength of resp frames (CTS/BA/ACK) */
141 		addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 34);
142 		val = mt76_rr(dev, addr);
143 
144 		rssi[0] = to_rssi(GENMASK(7, 0), val);
145 		rssi[1] = to_rssi(GENMASK(15, 8), val);
146 		rssi[2] = to_rssi(GENMASK(23, 16), val);
147 		rssi[3] = to_rssi(GENMASK(31, 14), val);
148 
149 		mlink->ack_signal =
150 			mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
151 
152 		ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal);
153 	}
154 }
155 
156 void mt7925_mac_set_fixed_rate_table(struct mt792x_dev *dev,
157 				     u8 tbl_idx, u16 rate_idx)
158 {
159 	u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx;
160 
161 	mt76_wr(dev, MT_WTBL_ITDR0, rate_idx);
162 	/* use wtbl spe idx */
163 	mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL);
164 	mt76_wr(dev, MT_WTBL_ITCR, ctrl);
165 }
166 
167 /* The HW does not translate the mac header to 802.3 for mesh point */
168 static int mt7925_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
169 {
170 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
171 	struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
172 	struct mt792x_sta *msta = (struct mt792x_sta *)status->wcid;
173 	__le32 *rxd = (__le32 *)skb->data;
174 	struct ieee80211_sta *sta;
175 	struct ieee80211_vif *vif;
176 	struct ieee80211_hdr hdr;
177 	u16 frame_control;
178 
179 	if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
180 	    MT_RXD3_NORMAL_U2M)
181 		return -EINVAL;
182 
183 	if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
184 		return -EINVAL;
185 
186 	if (!msta || !msta->vif)
187 		return -EINVAL;
188 
189 	sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
190 	vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
191 
192 	/* store the info from RXD and ethhdr to avoid being overridden */
193 	frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
194 	hdr.frame_control = cpu_to_le16(frame_control);
195 	hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
196 	hdr.duration_id = 0;
197 
198 	ether_addr_copy(hdr.addr1, vif->addr);
199 	ether_addr_copy(hdr.addr2, sta->addr);
200 	switch (frame_control & (IEEE80211_FCTL_TODS |
201 				 IEEE80211_FCTL_FROMDS)) {
202 	case 0:
203 		ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
204 		break;
205 	case IEEE80211_FCTL_FROMDS:
206 		ether_addr_copy(hdr.addr3, eth_hdr->h_source);
207 		break;
208 	case IEEE80211_FCTL_TODS:
209 		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
210 		break;
211 	case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
212 		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
213 		ether_addr_copy(hdr.addr4, eth_hdr->h_source);
214 		break;
215 	default:
216 		break;
217 	}
218 
219 	skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
220 	if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
221 	    eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
222 		ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
223 	else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
224 		ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
225 	else
226 		skb_pull(skb, 2);
227 
228 	if (ieee80211_has_order(hdr.frame_control))
229 		memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
230 		       IEEE80211_HT_CTL_LEN);
231 	if (ieee80211_is_data_qos(hdr.frame_control)) {
232 		__le16 qos_ctrl;
233 
234 		qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
235 		memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
236 		       IEEE80211_QOS_CTL_LEN);
237 	}
238 
239 	if (ieee80211_has_a4(hdr.frame_control))
240 		memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
241 	else
242 		memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
243 
244 	return 0;
245 }
246 
247 static int
248 mt7925_mac_fill_rx_rate(struct mt792x_dev *dev,
249 			struct mt76_rx_status *status,
250 			struct ieee80211_supported_band *sband,
251 			__le32 *rxv, u8 *mode)
252 {
253 	u32 v0, v2;
254 	u8 stbc, gi, bw, dcm, nss;
255 	int i, idx;
256 	bool cck = false;
257 
258 	v0 = le32_to_cpu(rxv[0]);
259 	v2 = le32_to_cpu(rxv[2]);
260 
261 	idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
262 	i = idx;
263 	nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
264 
265 	stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
266 	gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
267 	*mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
268 	dcm = FIELD_GET(MT_PRXV_DCM, v2);
269 	bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
270 
271 	switch (*mode) {
272 	case MT_PHY_TYPE_CCK:
273 		cck = true;
274 		fallthrough;
275 	case MT_PHY_TYPE_OFDM:
276 		i = mt76_get_rate(&dev->mt76, sband, i, cck);
277 		break;
278 	case MT_PHY_TYPE_HT_GF:
279 	case MT_PHY_TYPE_HT:
280 		status->encoding = RX_ENC_HT;
281 		if (gi)
282 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
283 		if (i > 31)
284 			return -EINVAL;
285 		break;
286 	case MT_PHY_TYPE_VHT:
287 		status->nss = nss;
288 		status->encoding = RX_ENC_VHT;
289 		if (gi)
290 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
291 		if (i > 11)
292 			return -EINVAL;
293 		break;
294 	case MT_PHY_TYPE_HE_MU:
295 	case MT_PHY_TYPE_HE_SU:
296 	case MT_PHY_TYPE_HE_EXT_SU:
297 	case MT_PHY_TYPE_HE_TB:
298 		status->nss = nss;
299 		status->encoding = RX_ENC_HE;
300 		i &= GENMASK(3, 0);
301 
302 		if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
303 			status->he_gi = gi;
304 
305 		status->he_dcm = dcm;
306 		break;
307 	case MT_PHY_TYPE_EHT_SU:
308 	case MT_PHY_TYPE_EHT_TRIG:
309 	case MT_PHY_TYPE_EHT_MU:
310 		status->nss = nss;
311 		status->encoding = RX_ENC_EHT;
312 		i &= GENMASK(3, 0);
313 
314 		if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
315 			status->eht.gi = gi;
316 		break;
317 	default:
318 		return -EINVAL;
319 	}
320 	status->rate_idx = i;
321 
322 	switch (bw) {
323 	case IEEE80211_STA_RX_BW_20:
324 		break;
325 	case IEEE80211_STA_RX_BW_40:
326 		if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
327 		    (idx & MT_PRXV_TX_ER_SU_106T)) {
328 			status->bw = RATE_INFO_BW_HE_RU;
329 			status->he_ru =
330 				NL80211_RATE_INFO_HE_RU_ALLOC_106;
331 		} else {
332 			status->bw = RATE_INFO_BW_40;
333 		}
334 		break;
335 	case IEEE80211_STA_RX_BW_80:
336 		status->bw = RATE_INFO_BW_80;
337 		break;
338 	case IEEE80211_STA_RX_BW_160:
339 		status->bw = RATE_INFO_BW_160;
340 		break;
341 	default:
342 		return -EINVAL;
343 	}
344 
345 	status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
346 	if (*mode < MT_PHY_TYPE_HE_SU && gi)
347 		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
348 
349 	return 0;
350 }
351 
352 static int
353 mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
354 {
355 	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
356 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
357 	bool hdr_trans, unicast, insert_ccmp_hdr = false;
358 	u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
359 	u16 hdr_gap;
360 	__le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
361 	struct mt76_phy *mphy = &dev->mt76.phy;
362 	struct mt792x_phy *phy = &dev->phy;
363 	struct ieee80211_supported_band *sband;
364 	u32 csum_status = *(u32 *)skb->cb;
365 	u32 rxd0 = le32_to_cpu(rxd[0]);
366 	u32 rxd1 = le32_to_cpu(rxd[1]);
367 	u32 rxd2 = le32_to_cpu(rxd[2]);
368 	u32 rxd3 = le32_to_cpu(rxd[3]);
369 	u32 rxd4 = le32_to_cpu(rxd[4]);
370 	struct mt792x_link_sta *mlink;
371 	u8 mode = 0; /* , band_idx; */
372 	u16 seq_ctrl = 0;
373 	__le16 fc = 0;
374 	int idx;
375 
376 	memset(status, 0, sizeof(*status));
377 
378 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
379 		return -EINVAL;
380 
381 	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
382 		return -EINVAL;
383 
384 	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
385 	if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
386 		return -EINVAL;
387 
388 	/* ICV error or CCMP/BIP/WPI MIC error */
389 	if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
390 		status->flag |= RX_FLAG_ONLY_MONITOR;
391 
392 	chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
393 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
394 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
395 	status->wcid = mt792x_rx_get_wcid(dev, idx, unicast);
396 
397 	if (status->wcid) {
398 		mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
399 		spin_lock_bh(&dev->mt76.sta_poll_lock);
400 		if (list_empty(&mlink->wcid.poll_list))
401 			list_add_tail(&mlink->wcid.poll_list,
402 				      &dev->mt76.sta_poll_list);
403 		spin_unlock_bh(&dev->mt76.sta_poll_lock);
404 	}
405 
406 	mt792x_get_status_freq_info(status, chfreq);
407 
408 	switch (status->band) {
409 	case NL80211_BAND_5GHZ:
410 		sband = &mphy->sband_5g.sband;
411 		break;
412 	case NL80211_BAND_6GHZ:
413 		sband = &mphy->sband_6g.sband;
414 		break;
415 	default:
416 		sband = &mphy->sband_2g.sband;
417 		break;
418 	}
419 
420 	if (!sband->channels)
421 		return -EINVAL;
422 
423 	if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
424 	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
425 		skb->ip_summed = CHECKSUM_UNNECESSARY;
426 
427 	if (rxd3 & MT_RXD3_NORMAL_FCS_ERR)
428 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
429 
430 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
431 		status->flag |= RX_FLAG_MMIC_ERROR;
432 
433 	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
434 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
435 		status->flag |= RX_FLAG_DECRYPTED;
436 		status->flag |= RX_FLAG_IV_STRIPPED;
437 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
438 	}
439 
440 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
441 
442 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
443 		return -EINVAL;
444 
445 	rxd += 8;
446 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
447 		u32 v0 = le32_to_cpu(rxd[0]);
448 		u32 v2 = le32_to_cpu(rxd[2]);
449 
450 		/* TODO: need to map rxd address */
451 		fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
452 		seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
453 		qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
454 
455 		rxd += 4;
456 		if ((u8 *)rxd - skb->data >= skb->len)
457 			return -EINVAL;
458 	}
459 
460 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
461 		u8 *data = (u8 *)rxd;
462 
463 		if (status->flag & RX_FLAG_DECRYPTED) {
464 			switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
465 			case MT_CIPHER_AES_CCMP:
466 			case MT_CIPHER_CCMP_CCX:
467 			case MT_CIPHER_CCMP_256:
468 				insert_ccmp_hdr =
469 					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
470 				fallthrough;
471 			case MT_CIPHER_TKIP:
472 			case MT_CIPHER_TKIP_NO_MIC:
473 			case MT_CIPHER_GCMP:
474 			case MT_CIPHER_GCMP_256:
475 				status->iv[0] = data[5];
476 				status->iv[1] = data[4];
477 				status->iv[2] = data[3];
478 				status->iv[3] = data[2];
479 				status->iv[4] = data[1];
480 				status->iv[5] = data[0];
481 				break;
482 			default:
483 				break;
484 			}
485 		}
486 		rxd += 4;
487 		if ((u8 *)rxd - skb->data >= skb->len)
488 			return -EINVAL;
489 	}
490 
491 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
492 		status->timestamp = le32_to_cpu(rxd[0]);
493 		status->flag |= RX_FLAG_MACTIME_START;
494 
495 		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
496 			status->flag |= RX_FLAG_AMPDU_DETAILS;
497 
498 			/* all subframes of an A-MPDU have the same timestamp */
499 			if (phy->rx_ampdu_ts != status->timestamp) {
500 				if (!++phy->ampdu_ref)
501 					phy->ampdu_ref++;
502 			}
503 			phy->rx_ampdu_ts = status->timestamp;
504 
505 			status->ampdu_ref = phy->ampdu_ref;
506 		}
507 
508 		rxd += 4;
509 		if ((u8 *)rxd - skb->data >= skb->len)
510 			return -EINVAL;
511 	}
512 
513 	/* RXD Group 3 - P-RXV */
514 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
515 		u32 v3;
516 		int ret;
517 
518 		rxv = rxd;
519 		rxd += 4;
520 		if ((u8 *)rxd - skb->data >= skb->len)
521 			return -EINVAL;
522 
523 		v3 = le32_to_cpu(rxv[3]);
524 
525 		status->chains = mphy->antenna_mask;
526 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
527 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
528 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
529 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
530 
531 		/* RXD Group 5 - C-RXV */
532 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
533 			rxd += 24;
534 			if ((u8 *)rxd - skb->data >= skb->len)
535 				return -EINVAL;
536 		}
537 
538 		ret = mt7925_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
539 		if (ret < 0)
540 			return ret;
541 	}
542 
543 	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
544 	status->amsdu = !!amsdu_info;
545 	if (status->amsdu) {
546 		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
547 		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
548 	}
549 
550 	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
551 	if (hdr_trans && ieee80211_has_morefrags(fc)) {
552 		if (mt7925_reverse_frag0_hdr_trans(skb, hdr_gap))
553 			return -EINVAL;
554 		hdr_trans = false;
555 	} else {
556 		int pad_start = 0;
557 
558 		skb_pull(skb, hdr_gap);
559 		if (!hdr_trans && status->amsdu) {
560 			pad_start = ieee80211_get_hdrlen_from_skb(skb);
561 		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
562 			/* When header translation failure is indicated,
563 			 * the hardware will insert an extra 2-byte field
564 			 * containing the data length after the protocol
565 			 * type field.
566 			 */
567 			pad_start = 12;
568 			if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
569 				pad_start += 4;
570 			else
571 				pad_start = 0;
572 		}
573 
574 		if (pad_start) {
575 			memmove(skb->data + 2, skb->data, pad_start);
576 			skb_pull(skb, 2);
577 		}
578 	}
579 
580 	if (!hdr_trans) {
581 		struct ieee80211_hdr *hdr;
582 
583 		if (insert_ccmp_hdr) {
584 			u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
585 
586 			mt76_insert_ccmp_hdr(skb, key_id);
587 		}
588 
589 		hdr = mt76_skb_get_hdr(skb);
590 		fc = hdr->frame_control;
591 		if (ieee80211_is_data_qos(fc)) {
592 			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
593 			qos_ctl = *ieee80211_get_qos_ctl(hdr);
594 		}
595 		skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
596 	} else {
597 		status->flag |= RX_FLAG_8023;
598 	}
599 
600 	mt792x_mac_assoc_rssi(dev, skb);
601 
602 	if (rxv && !(status->flag & RX_FLAG_8023)) {
603 		switch (status->encoding) {
604 		case RX_ENC_EHT:
605 			mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
606 			break;
607 		case RX_ENC_HE:
608 			mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
609 			break;
610 		default:
611 			break;
612 		}
613 	}
614 
615 	if (!status->wcid || !ieee80211_is_data_qos(fc))
616 		return 0;
617 
618 	status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
619 	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
620 	status->qos_ctl = qos_ctl;
621 
622 	return 0;
623 }
624 
625 static void
626 mt7925_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb,
627 			   struct mt76_wcid *wcid)
628 {
629 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
630 	u8 fc_type, fc_stype;
631 	u16 ethertype;
632 	bool wmm = false;
633 	u32 val;
634 
635 	if (wcid->sta) {
636 		struct ieee80211_sta *sta;
637 
638 		sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
639 		wmm = sta->wme;
640 	}
641 
642 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
643 	      FIELD_PREP(MT_TXD1_TID, tid);
644 
645 	ethertype = get_unaligned_be16(&skb->data[12]);
646 	if (ethertype >= ETH_P_802_3_MIN)
647 		val |= MT_TXD1_ETH_802_3;
648 
649 	txwi[1] |= cpu_to_le32(val);
650 
651 	fc_type = IEEE80211_FTYPE_DATA >> 2;
652 	fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
653 
654 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
655 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
656 
657 	txwi[2] |= cpu_to_le32(val);
658 }
659 
660 static void
661 mt7925_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
662 			    struct sk_buff *skb,
663 			    struct ieee80211_key_conf *key)
664 {
665 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
666 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
667 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
668 	bool multicast = is_multicast_ether_addr(hdr->addr1);
669 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
670 	__le16 fc = hdr->frame_control;
671 	u8 fc_type, fc_stype;
672 	u32 val;
673 
674 	if (ieee80211_is_action(fc) &&
675 	    mgmt->u.action.category == WLAN_CATEGORY_BACK &&
676 	    mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
677 		tid = MT_TX_ADDBA;
678 	else if (ieee80211_is_mgmt(hdr->frame_control))
679 		tid = MT_TX_NORMAL;
680 
681 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
682 	      FIELD_PREP(MT_TXD1_HDR_INFO,
683 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
684 	      FIELD_PREP(MT_TXD1_TID, tid);
685 
686 	if (!ieee80211_is_data(fc) || multicast ||
687 	    info->flags & IEEE80211_TX_CTL_USE_MINRATE)
688 		val |= MT_TXD1_FIXED_RATE;
689 
690 	if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
691 	    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
692 		val |= MT_TXD1_BIP;
693 		txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
694 	}
695 
696 	txwi[1] |= cpu_to_le32(val);
697 
698 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
699 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
700 
701 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
702 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
703 
704 	txwi[2] |= cpu_to_le32(val);
705 
706 	txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
707 	if (ieee80211_is_beacon(fc))
708 		txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
709 
710 	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
711 		u16 seqno = le16_to_cpu(hdr->seq_ctrl);
712 
713 		if (ieee80211_is_back_req(hdr->frame_control)) {
714 			struct ieee80211_bar *bar;
715 
716 			bar = (struct ieee80211_bar *)skb->data;
717 			seqno = le16_to_cpu(bar->start_seq_num);
718 		}
719 
720 		val = MT_TXD3_SN_VALID |
721 		      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
722 		txwi[3] |= cpu_to_le32(val);
723 		txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
724 	}
725 }
726 
727 void
728 mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
729 		      struct sk_buff *skb, struct mt76_wcid *wcid,
730 		      struct ieee80211_key_conf *key, int pid,
731 		      enum mt76_txq_id qid, u32 changed)
732 {
733 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
734 	struct ieee80211_vif *vif = info->control.vif;
735 	u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
736 	u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
737 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
738 	struct mt76_vif *mvif;
739 	bool beacon = !!(changed & (BSS_CHANGED_BEACON |
740 				    BSS_CHANGED_BEACON_ENABLED));
741 	bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
742 					 BSS_CHANGED_FILS_DISCOVERY));
743 	struct mt792x_bss_conf *mconf;
744 
745 	mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv,
746 					 wcid->link_id) : NULL;
747 	mvif = mconf ? (struct mt76_vif *)&mconf->mt76 : NULL;
748 
749 	if (mvif) {
750 		omac_idx = mvif->omac_idx;
751 		wmm_idx = mvif->wmm_idx;
752 		band_idx = mvif->band_idx;
753 	}
754 
755 	if (inband_disc) {
756 		p_fmt = MT_TX_TYPE_FW;
757 		q_idx = MT_LMAC_ALTX0;
758 	} else if (beacon) {
759 		p_fmt = MT_TX_TYPE_FW;
760 		q_idx = MT_LMAC_BCN0;
761 	} else if (qid >= MT_TXQ_PSD) {
762 		p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
763 		q_idx = MT_LMAC_ALTX0;
764 	} else {
765 		p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
766 		q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
767 			mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
768 
769 		/* counting non-offloading skbs */
770 		wcid->stats.tx_bytes += skb->len;
771 		wcid->stats.tx_packets++;
772 	}
773 
774 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
775 	      FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
776 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
777 	txwi[0] = cpu_to_le32(val);
778 
779 	val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
780 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
781 
782 	if (band_idx)
783 		val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
784 
785 	txwi[1] = cpu_to_le32(val);
786 	txwi[2] = 0;
787 
788 	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15);
789 
790 	if (key)
791 		val |= MT_TXD3_PROTECT_FRAME;
792 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
793 		val |= MT_TXD3_NO_ACK;
794 	if (wcid->amsdu)
795 		val |= MT_TXD3_HW_AMSDU;
796 
797 	txwi[3] = cpu_to_le32(val);
798 	txwi[4] = 0;
799 
800 	val = FIELD_PREP(MT_TXD5_PID, pid);
801 	if (pid >= MT_PACKET_ID_FIRST) {
802 		val |= MT_TXD5_TX_STATUS_HOST;
803 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
804 		txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
805 	}
806 
807 	txwi[5] = cpu_to_le32(val);
808 
809 	val = MT_TXD6_DAS | FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
810 	if (!ieee80211_vif_is_mld(vif) ||
811 	    (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0))
812 		val |= MT_TXD6_DIS_MAT;
813 	txwi[6] = cpu_to_le32(val);
814 	txwi[7] = 0;
815 
816 	if (is_8023)
817 		mt7925_mac_write_txwi_8023(txwi, skb, wcid);
818 	else
819 		mt7925_mac_write_txwi_80211(dev, txwi, skb, key);
820 
821 	if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
822 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
823 		bool mcast = ieee80211_is_data(hdr->frame_control) &&
824 			     is_multicast_ether_addr(hdr->addr1);
825 		u8 idx = MT792x_BASIC_RATES_TBL;
826 
827 		if (mvif) {
828 			if (mcast && mvif->mcast_rates_idx)
829 				idx = mvif->mcast_rates_idx;
830 			else if (beacon && mvif->beacon_rates_idx)
831 				idx = mvif->beacon_rates_idx;
832 			else
833 				idx = mvif->basic_rates_idx;
834 		}
835 
836 		txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
837 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
838 	}
839 }
840 EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi);
841 
842 static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb,
843 				 struct mt76_wcid *wcid)
844 {
845 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
846 	struct ieee80211_link_sta *link_sta;
847 	struct mt792x_link_sta *mlink;
848 	struct mt792x_sta *msta;
849 	bool is_8023;
850 	u16 fc, tid;
851 
852 	link_sta = rcu_dereference(sta->link[wcid->link_id]);
853 	if (!link_sta)
854 		return;
855 
856 	if (!sta || !(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
857 		return;
858 
859 	tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
860 	is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
861 
862 	if (is_8023) {
863 		fc = IEEE80211_FTYPE_DATA |
864 		     (sta->wme ? IEEE80211_STYPE_QOS_DATA :
865 		      IEEE80211_STYPE_DATA);
866 	} else {
867 		/* No need to get precise TID for Action/Management Frame,
868 		 * since it will not meet the following Frame Control
869 		 * condition anyway.
870 		 */
871 
872 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
873 
874 		fc = le16_to_cpu(hdr->frame_control) &
875 		     (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
876 	}
877 
878 	if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
879 		return;
880 
881 	msta = (struct mt792x_sta *)sta->drv_priv;
882 
883 	if (sta->mlo && msta->deflink_id != IEEE80211_LINK_UNSPECIFIED)
884 		mlink = rcu_dereference(msta->link[msta->deflink_id]);
885 	else
886 		mlink = &msta->deflink;
887 
888 	if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state))
889 		ieee80211_start_tx_ba_session(sta, tid, 0);
890 }
891 
892 static bool
893 mt7925_mac_add_txs_skb(struct mt792x_dev *dev, struct mt76_wcid *wcid,
894 		       int pid, __le32 *txs_data)
895 {
896 	struct mt76_sta_stats *stats = &wcid->stats;
897 	struct ieee80211_supported_band *sband;
898 	struct mt76_dev *mdev = &dev->mt76;
899 	struct mt76_phy *mphy;
900 	struct ieee80211_tx_info *info;
901 	struct sk_buff_head list;
902 	struct rate_info rate = {};
903 	struct sk_buff *skb;
904 	bool cck = false;
905 	u32 txrate, txs, mode, stbc;
906 
907 	mt76_tx_status_lock(mdev, &list);
908 	skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
909 	if (!skb)
910 		goto out_no_skb;
911 
912 	txs = le32_to_cpu(txs_data[0]);
913 
914 	info = IEEE80211_SKB_CB(skb);
915 	if (!(txs & MT_TXS0_ACK_ERROR_MASK))
916 		info->flags |= IEEE80211_TX_STAT_ACK;
917 
918 	info->status.ampdu_len = 1;
919 	info->status.ampdu_ack_len = !!(info->flags &
920 					IEEE80211_TX_STAT_ACK);
921 
922 	info->status.rates[0].idx = -1;
923 
924 	txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
925 
926 	rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
927 	rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
928 	stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
929 
930 	if (stbc && rate.nss > 1)
931 		rate.nss >>= 1;
932 
933 	if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
934 		stats->tx_nss[rate.nss - 1]++;
935 	if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
936 		stats->tx_mcs[rate.mcs]++;
937 
938 	mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
939 	switch (mode) {
940 	case MT_PHY_TYPE_CCK:
941 		cck = true;
942 		fallthrough;
943 	case MT_PHY_TYPE_OFDM:
944 		mphy = mt76_dev_phy(mdev, wcid->phy_idx);
945 
946 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
947 			sband = &mphy->sband_5g.sband;
948 		else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
949 			sband = &mphy->sband_6g.sband;
950 		else
951 			sband = &mphy->sband_2g.sband;
952 
953 		rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
954 		rate.legacy = sband->bitrates[rate.mcs].bitrate;
955 		break;
956 	case MT_PHY_TYPE_HT:
957 	case MT_PHY_TYPE_HT_GF:
958 		if (rate.mcs > 31)
959 			goto out;
960 
961 		rate.flags = RATE_INFO_FLAGS_MCS;
962 		if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
963 			rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
964 		break;
965 	case MT_PHY_TYPE_VHT:
966 		if (rate.mcs > 9)
967 			goto out;
968 
969 		rate.flags = RATE_INFO_FLAGS_VHT_MCS;
970 		break;
971 	case MT_PHY_TYPE_HE_SU:
972 	case MT_PHY_TYPE_HE_EXT_SU:
973 	case MT_PHY_TYPE_HE_TB:
974 	case MT_PHY_TYPE_HE_MU:
975 		if (rate.mcs > 11)
976 			goto out;
977 
978 		rate.he_gi = wcid->rate.he_gi;
979 		rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
980 		rate.flags = RATE_INFO_FLAGS_HE_MCS;
981 		break;
982 	case MT_PHY_TYPE_EHT_SU:
983 	case MT_PHY_TYPE_EHT_TRIG:
984 	case MT_PHY_TYPE_EHT_MU:
985 		if (rate.mcs > 13)
986 			goto out;
987 
988 		rate.eht_gi = wcid->rate.eht_gi;
989 		rate.flags = RATE_INFO_FLAGS_EHT_MCS;
990 		break;
991 	default:
992 		goto out;
993 	}
994 
995 	stats->tx_mode[mode]++;
996 
997 	switch (FIELD_GET(MT_TXS0_BW, txs)) {
998 	case IEEE80211_STA_RX_BW_160:
999 		rate.bw = RATE_INFO_BW_160;
1000 		stats->tx_bw[3]++;
1001 		break;
1002 	case IEEE80211_STA_RX_BW_80:
1003 		rate.bw = RATE_INFO_BW_80;
1004 		stats->tx_bw[2]++;
1005 		break;
1006 	case IEEE80211_STA_RX_BW_40:
1007 		rate.bw = RATE_INFO_BW_40;
1008 		stats->tx_bw[1]++;
1009 		break;
1010 	default:
1011 		rate.bw = RATE_INFO_BW_20;
1012 		stats->tx_bw[0]++;
1013 		break;
1014 	}
1015 	wcid->rate = rate;
1016 
1017 out:
1018 	mt76_tx_status_skb_done(mdev, skb, &list);
1019 
1020 out_no_skb:
1021 	mt76_tx_status_unlock(mdev, &list);
1022 
1023 	return !!skb;
1024 }
1025 
1026 void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
1027 {
1028 	struct mt792x_link_sta *mlink = NULL;
1029 	struct mt76_wcid *wcid;
1030 	__le32 *txs_data = data;
1031 	u16 wcidx;
1032 	u8 pid;
1033 
1034 	if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
1035 		return;
1036 
1037 	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1038 	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1039 
1040 	if (pid < MT_PACKET_ID_FIRST)
1041 		return;
1042 
1043 	if (wcidx >= MT792x_WTBL_SIZE)
1044 		return;
1045 
1046 	rcu_read_lock();
1047 
1048 	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1049 	if (!wcid)
1050 		goto out;
1051 
1052 	mlink = container_of(wcid, struct mt792x_link_sta, wcid);
1053 
1054 	mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data);
1055 	if (!wcid->sta)
1056 		goto out;
1057 
1058 	spin_lock_bh(&dev->mt76.sta_poll_lock);
1059 	if (list_empty(&mlink->wcid.poll_list))
1060 		list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list);
1061 	spin_unlock_bh(&dev->mt76.sta_poll_lock);
1062 
1063 out:
1064 	rcu_read_unlock();
1065 }
1066 
1067 void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
1068 		      struct ieee80211_sta *sta, struct mt76_wcid *wcid,
1069 		      struct list_head *free_list)
1070 {
1071 	struct mt76_dev *mdev = &dev->mt76;
1072 	__le32 *txwi;
1073 	u16 wcid_idx;
1074 
1075 	mt76_connac_txp_skb_unmap(mdev, t);
1076 	if (!t->skb)
1077 		goto out;
1078 
1079 	txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1080 	if (sta) {
1081 		if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1082 			mt7925_tx_check_aggr(sta, t->skb, wcid);
1083 
1084 		wcid_idx = wcid->idx;
1085 	} else {
1086 		wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1087 	}
1088 
1089 	__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1090 out:
1091 	t->skb = NULL;
1092 	mt76_put_txwi(mdev, t);
1093 }
1094 EXPORT_SYMBOL_GPL(mt7925_txwi_free);
1095 
1096 static void
1097 mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
1098 {
1099 	__le32 *tx_free = (__le32 *)data, *cur_info;
1100 	struct mt76_dev *mdev = &dev->mt76;
1101 	struct mt76_txwi_cache *txwi;
1102 	struct ieee80211_sta *sta = NULL;
1103 	struct mt76_wcid *wcid = NULL;
1104 	LIST_HEAD(free_list);
1105 	struct sk_buff *skb, *tmp;
1106 	void *end = data + len;
1107 	bool wake = false;
1108 	u16 total, count = 0;
1109 
1110 	/* clean DMA queues and unmap buffers first */
1111 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1112 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1113 
1114 	if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
1115 		return;
1116 
1117 	total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1118 	for (cur_info = &tx_free[2]; count < total; cur_info++) {
1119 		u32 msdu, info;
1120 		u8 i;
1121 
1122 		if (WARN_ON_ONCE((void *)cur_info >= end))
1123 			return;
1124 		/* 1'b1: new wcid pair.
1125 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
1126 		 */
1127 		info = le32_to_cpu(*cur_info);
1128 		if (info & MT_TXFREE_INFO_PAIR) {
1129 			struct mt792x_link_sta *mlink;
1130 			u16 idx;
1131 
1132 			idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1133 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
1134 			sta = wcid_to_sta(wcid);
1135 			if (!sta)
1136 				continue;
1137 
1138 			mlink = container_of(wcid, struct mt792x_link_sta, wcid);
1139 			spin_lock_bh(&mdev->sta_poll_lock);
1140 			if (list_empty(&mlink->wcid.poll_list))
1141 				list_add_tail(&mlink->wcid.poll_list,
1142 					      &mdev->sta_poll_list);
1143 			spin_unlock_bh(&mdev->sta_poll_lock);
1144 			continue;
1145 		}
1146 
1147 		if (info & MT_TXFREE_INFO_HEADER) {
1148 			if (wcid) {
1149 				wcid->stats.tx_retries +=
1150 					FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1151 				wcid->stats.tx_failed +=
1152 					!!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1153 			}
1154 			continue;
1155 		}
1156 
1157 		for (i = 0; i < 2; i++) {
1158 			msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1159 			if (msdu == MT_TXFREE_INFO_MSDU_ID)
1160 				continue;
1161 
1162 			count++;
1163 			txwi = mt76_token_release(mdev, msdu, &wake);
1164 			if (!txwi)
1165 				continue;
1166 
1167 			mt7925_txwi_free(dev, txwi, sta, wcid, &free_list);
1168 		}
1169 	}
1170 
1171 	mt7925_mac_sta_poll(dev);
1172 
1173 	if (wake)
1174 		mt76_set_tx_blocked(&dev->mt76, false);
1175 
1176 	mt76_worker_schedule(&dev->mt76.tx_worker);
1177 
1178 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
1179 		skb_list_del_init(skb);
1180 		napi_consume_skb(skb, 1);
1181 	}
1182 }
1183 
1184 bool mt7925_rx_check(struct mt76_dev *mdev, void *data, int len)
1185 {
1186 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1187 	__le32 *rxd = (__le32 *)data;
1188 	__le32 *end = (__le32 *)&rxd[len / 4];
1189 	enum rx_pkt_type type;
1190 
1191 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1192 	if (type != PKT_TYPE_NORMAL) {
1193 		u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1194 
1195 		if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1196 			     MT_RXD0_SW_PKT_TYPE_FRAME))
1197 			return true;
1198 	}
1199 
1200 	switch (type) {
1201 	case PKT_TYPE_TXRX_NOTIFY:
1202 		/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1203 		mt7925_mac_tx_free(dev, data, len); /* mmio */
1204 		return false;
1205 	case PKT_TYPE_TXS:
1206 		for (rxd += 4; rxd + 12 <= end; rxd += 12)
1207 			mt7925_mac_add_txs(dev, rxd);
1208 		return false;
1209 	default:
1210 		return true;
1211 	}
1212 }
1213 EXPORT_SYMBOL_GPL(mt7925_rx_check);
1214 
1215 void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1216 			 struct sk_buff *skb, u32 *info)
1217 {
1218 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1219 	__le32 *rxd = (__le32 *)skb->data;
1220 	__le32 *end = (__le32 *)&skb->data[skb->len];
1221 	enum rx_pkt_type type;
1222 	u16 flag;
1223 
1224 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1225 	flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
1226 	if (type != PKT_TYPE_NORMAL) {
1227 		u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1228 
1229 		if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1230 			     MT_RXD0_SW_PKT_TYPE_FRAME))
1231 			type = PKT_TYPE_NORMAL;
1232 	}
1233 
1234 	if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
1235 		type = PKT_TYPE_NORMAL_MCU;
1236 
1237 	switch (type) {
1238 	case PKT_TYPE_TXRX_NOTIFY:
1239 		/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1240 		mt7925_mac_tx_free(dev, skb->data, skb->len);
1241 		napi_consume_skb(skb, 1);
1242 		break;
1243 	case PKT_TYPE_RX_EVENT:
1244 		mt7925_mcu_rx_event(dev, skb);
1245 		break;
1246 	case PKT_TYPE_TXS:
1247 		for (rxd += 2; rxd + 8 <= end; rxd += 8)
1248 			mt7925_mac_add_txs(dev, rxd);
1249 		dev_kfree_skb(skb);
1250 		break;
1251 	case PKT_TYPE_NORMAL_MCU:
1252 	case PKT_TYPE_NORMAL:
1253 		if (!mt7925_mac_fill_rx(dev, skb)) {
1254 			mt76_rx(&dev->mt76, q, skb);
1255 			return;
1256 		}
1257 		fallthrough;
1258 	default:
1259 		dev_kfree_skb(skb);
1260 		break;
1261 	}
1262 }
1263 EXPORT_SYMBOL_GPL(mt7925_queue_rx_skb);
1264 
1265 static void
1266 mt7925_vif_connect_iter(void *priv, u8 *mac,
1267 			struct ieee80211_vif *vif)
1268 {
1269 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
1270 	unsigned long valid = ieee80211_vif_is_mld(vif) ?
1271 			      mvif->valid_links : BIT(0);
1272 	struct mt792x_dev *dev = mvif->phy->dev;
1273 	struct ieee80211_hw *hw = mt76_hw(dev);
1274 	struct ieee80211_bss_conf *bss_conf;
1275 	int i;
1276 
1277 	if (vif->type == NL80211_IFTYPE_STATION)
1278 		ieee80211_disconnect(vif, true);
1279 
1280 	for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
1281 		bss_conf = mt792x_vif_to_bss_conf(vif, i);
1282 
1283 		mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf,
1284 					    &mvif->sta.deflink.wcid, true);
1285 		mt7925_mcu_set_tx(dev, bss_conf);
1286 	}
1287 
1288 	if (vif->type == NL80211_IFTYPE_AP) {
1289 		mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid,
1290 					    true, NULL);
1291 		mt7925_mcu_sta_update(dev, NULL, vif, true,
1292 				      MT76_STA_INFO_STATE_NONE);
1293 		mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, true);
1294 	}
1295 }
1296 
1297 /* system error recovery */
1298 void mt7925_mac_reset_work(struct work_struct *work)
1299 {
1300 	struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
1301 					      reset_work);
1302 	struct ieee80211_hw *hw = mt76_hw(dev);
1303 	struct mt76_connac_pm *pm = &dev->pm;
1304 	int i, ret;
1305 
1306 	dev_dbg(dev->mt76.dev, "chip reset\n");
1307 	dev->hw_full_reset = true;
1308 	ieee80211_stop_queues(hw);
1309 
1310 	cancel_delayed_work_sync(&dev->mphy.mac_work);
1311 	cancel_delayed_work_sync(&pm->ps_work);
1312 	cancel_work_sync(&pm->wake_work);
1313 
1314 	for (i = 0; i < 10; i++) {
1315 		mutex_lock(&dev->mt76.mutex);
1316 		ret = mt792x_dev_reset(dev);
1317 		mutex_unlock(&dev->mt76.mutex);
1318 
1319 		if (!ret)
1320 			break;
1321 	}
1322 
1323 	if (i == 10)
1324 		dev_err(dev->mt76.dev, "chip reset failed\n");
1325 
1326 	if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
1327 		struct cfg80211_scan_info info = {
1328 			.aborted = true,
1329 		};
1330 
1331 		ieee80211_scan_completed(dev->mphy.hw, &info);
1332 	}
1333 
1334 	dev->hw_full_reset = false;
1335 	pm->suspended = false;
1336 	ieee80211_wake_queues(hw);
1337 	ieee80211_iterate_active_interfaces(hw,
1338 					    IEEE80211_IFACE_ITER_RESUME_ALL,
1339 					    mt7925_vif_connect_iter, NULL);
1340 	mt76_connac_power_save_sched(&dev->mt76.phy, pm);
1341 }
1342 
1343 void mt7925_coredump_work(struct work_struct *work)
1344 {
1345 	struct mt792x_dev *dev;
1346 	char *dump, *data;
1347 
1348 	dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
1349 						coredump.work.work);
1350 
1351 	if (time_is_after_jiffies(dev->coredump.last_activity +
1352 				  4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1353 		queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1354 				   MT76_CONNAC_COREDUMP_TIMEOUT);
1355 		return;
1356 	}
1357 
1358 	dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1359 	data = dump;
1360 
1361 	while (true) {
1362 		struct sk_buff *skb;
1363 
1364 		spin_lock_bh(&dev->mt76.lock);
1365 		skb = __skb_dequeue(&dev->coredump.msg_list);
1366 		spin_unlock_bh(&dev->mt76.lock);
1367 
1368 		if (!skb)
1369 			break;
1370 
1371 		skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 8);
1372 		if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
1373 			dev_kfree_skb(skb);
1374 			continue;
1375 		}
1376 
1377 		memcpy(data, skb->data, skb->len);
1378 		data += skb->len;
1379 
1380 		dev_kfree_skb(skb);
1381 	}
1382 
1383 	if (dump)
1384 		dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1385 			      GFP_KERNEL);
1386 
1387 	mt792x_reset(&dev->mt76);
1388 }
1389 
1390 /* usb_sdio */
1391 static void
1392 mt7925_usb_sdio_write_txwi(struct mt792x_dev *dev, struct mt76_wcid *wcid,
1393 			   enum mt76_txq_id qid, struct ieee80211_sta *sta,
1394 			   struct ieee80211_key_conf *key, int pid,
1395 			   struct sk_buff *skb)
1396 {
1397 	__le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
1398 
1399 	memset(txwi, 0, MT_SDIO_TXD_SIZE);
1400 	mt7925_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
1401 	skb_push(skb, MT_SDIO_TXD_SIZE);
1402 }
1403 
1404 int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1405 				   enum mt76_txq_id qid, struct mt76_wcid *wcid,
1406 				   struct ieee80211_sta *sta,
1407 				   struct mt76_tx_info *tx_info)
1408 {
1409 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1410 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1411 	struct ieee80211_key_conf *key = info->control.hw_key;
1412 	struct sk_buff *skb = tx_info->skb;
1413 	int err, pad, pktid;
1414 
1415 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
1416 		return -EINVAL;
1417 
1418 	if (!wcid)
1419 		wcid = &dev->mt76.global_wcid;
1420 
1421 	if (sta) {
1422 		struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
1423 
1424 		if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
1425 			info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1426 			msta->deflink.last_txs = jiffies;
1427 		}
1428 	}
1429 
1430 	pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
1431 	mt7925_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
1432 
1433 	mt792x_skb_add_usb_sdio_hdr(dev, skb, 0);
1434 	pad = round_up(skb->len, 4) - skb->len;
1435 	if (mt76_is_usb(mdev))
1436 		pad += 4;
1437 
1438 	err = mt76_skb_adjust_pad(skb, pad);
1439 	if (err)
1440 		/* Release pktid in case of error. */
1441 		idr_remove(&wcid->pktid, pktid);
1442 
1443 	return err;
1444 }
1445 EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_prepare_skb);
1446 
1447 void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
1448 				     struct mt76_queue_entry *e)
1449 {
1450 	__le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
1451 	unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
1452 	struct ieee80211_sta *sta;
1453 	struct mt76_wcid *wcid;
1454 	u16 idx;
1455 
1456 	idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1457 	wcid = rcu_dereference(mdev->wcid[idx]);
1458 	sta = wcid_to_sta(wcid);
1459 
1460 	if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1461 		mt76_connac2_tx_check_aggr(sta, txwi);
1462 
1463 	skb_pull(e->skb, headroom);
1464 	mt76_tx_complete_skb(mdev, e->wcid, e->skb);
1465 }
1466 EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_complete_skb);
1467 
1468 bool mt7925_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
1469 {
1470 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1471 
1472 	mt792x_mutex_acquire(dev);
1473 	mt7925_mac_sta_poll(dev);
1474 	mt792x_mutex_release(dev);
1475 
1476 	return false;
1477 }
1478 EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_status_data);
1479 
1480 #if IS_ENABLED(CONFIG_IPV6)
1481 void mt7925_set_ipv6_ns_work(struct work_struct *work)
1482 {
1483 	struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
1484 						ipv6_ns_work);
1485 	struct sk_buff *skb;
1486 	int ret = 0;
1487 
1488 	do {
1489 		skb = skb_dequeue(&dev->ipv6_ns_list);
1490 
1491 		if (!skb)
1492 			break;
1493 
1494 		mt792x_mutex_acquire(dev);
1495 		ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
1496 					    MCU_UNI_CMD(OFFLOAD), true);
1497 		mt792x_mutex_release(dev);
1498 
1499 	} while (!ret);
1500 
1501 	if (ret)
1502 		skb_queue_purge(&dev->ipv6_ns_list);
1503 }
1504 #endif
1505