xref: /linux/drivers/net/wireless/mediatek/mt76/mt7921/mac.c (revision 78562b2cafc61a0c08dc949eacb942ac756aae37)
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/devcoredump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/timekeeping.h>
7 #include "mt7921.h"
8 #include "../dma.h"
9 #include "../mt76_connac2_mac.h"
10 #include "mcu.h"
11 
12 #define MT_WTBL_TXRX_CAP_RATE_OFFSET	7
13 #define MT_WTBL_TXRX_RATE_G2_HE		24
14 #define MT_WTBL_TXRX_RATE_G2		12
15 
16 #define MT_WTBL_AC0_CTT_OFFSET		20
17 
18 static u32 mt7921_mac_wtbl_lmac_addr(int idx, u8 offset)
19 {
20 	return MT_WTBL_LMAC_OFFS(idx, 0) + offset * 4;
21 }
22 
23 static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
24 					    u16 idx, bool unicast)
25 {
26 	struct mt792x_sta *sta;
27 	struct mt76_wcid *wcid;
28 
29 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
30 		return NULL;
31 
32 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
33 	if (unicast || !wcid)
34 		return wcid;
35 
36 	if (!wcid->sta)
37 		return NULL;
38 
39 	sta = container_of(wcid, struct mt792x_sta, wcid);
40 	if (!sta->vif)
41 		return NULL;
42 
43 	return &sta->vif->sta.wcid;
44 }
45 
46 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
47 {
48 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
49 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
50 
51 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
52 			 0, 5000);
53 }
54 
55 static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
56 {
57 	static const u8 ac_to_tid[] = {
58 		[IEEE80211_AC_BE] = 0,
59 		[IEEE80211_AC_BK] = 1,
60 		[IEEE80211_AC_VI] = 4,
61 		[IEEE80211_AC_VO] = 6
62 	};
63 	struct ieee80211_sta *sta;
64 	struct mt792x_sta *msta;
65 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
66 	LIST_HEAD(sta_poll_list);
67 	struct rate_info *rate;
68 	s8 rssi[4];
69 	int i;
70 
71 	spin_lock_bh(&dev->mt76.sta_poll_lock);
72 	list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
73 	spin_unlock_bh(&dev->mt76.sta_poll_lock);
74 
75 	while (true) {
76 		bool clear = false;
77 		u32 addr, val;
78 		u16 idx;
79 		u8 bw;
80 
81 		spin_lock_bh(&dev->mt76.sta_poll_lock);
82 		if (list_empty(&sta_poll_list)) {
83 			spin_unlock_bh(&dev->mt76.sta_poll_lock);
84 			break;
85 		}
86 		msta = list_first_entry(&sta_poll_list,
87 					struct mt792x_sta, wcid.poll_list);
88 		list_del_init(&msta->wcid.poll_list);
89 		spin_unlock_bh(&dev->mt76.sta_poll_lock);
90 
91 		idx = msta->wcid.idx;
92 		addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_AC0_CTT_OFFSET);
93 
94 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
95 			u32 tx_last = msta->airtime_ac[i];
96 			u32 rx_last = msta->airtime_ac[i + 4];
97 
98 			msta->airtime_ac[i] = mt76_rr(dev, addr);
99 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
100 
101 			tx_time[i] = msta->airtime_ac[i] - tx_last;
102 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
103 
104 			if ((tx_last | rx_last) & BIT(30))
105 				clear = true;
106 
107 			addr += 8;
108 		}
109 
110 		if (clear) {
111 			mt7921_mac_wtbl_update(dev, idx,
112 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
113 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
114 		}
115 
116 		if (!msta->wcid.sta)
117 			continue;
118 
119 		sta = container_of((void *)msta, struct ieee80211_sta,
120 				   drv_priv);
121 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
122 			u8 q = mt76_connac_lmac_mapping(i);
123 			u32 tx_cur = tx_time[q];
124 			u32 rx_cur = rx_time[q];
125 			u8 tid = ac_to_tid[i];
126 
127 			if (!tx_cur && !rx_cur)
128 				continue;
129 
130 			ieee80211_sta_register_airtime(sta, tid, tx_cur,
131 						       rx_cur);
132 		}
133 
134 		/* We don't support reading GI info from txs packets.
135 		 * For accurate tx status reporting and AQL improvement,
136 		 * we need to make sure that flags match so polling GI
137 		 * from per-sta counters directly.
138 		 */
139 		rate = &msta->wcid.rate;
140 		addr = mt7921_mac_wtbl_lmac_addr(idx,
141 						 MT_WTBL_TXRX_CAP_RATE_OFFSET);
142 		val = mt76_rr(dev, addr);
143 
144 		switch (rate->bw) {
145 		case RATE_INFO_BW_160:
146 			bw = IEEE80211_STA_RX_BW_160;
147 			break;
148 		case RATE_INFO_BW_80:
149 			bw = IEEE80211_STA_RX_BW_80;
150 			break;
151 		case RATE_INFO_BW_40:
152 			bw = IEEE80211_STA_RX_BW_40;
153 			break;
154 		default:
155 			bw = IEEE80211_STA_RX_BW_20;
156 			break;
157 		}
158 
159 		if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
160 			u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw;
161 
162 			rate->he_gi = (val & (0x3 << offs)) >> offs;
163 		} else if (rate->flags &
164 			   (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
165 			if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw))
166 				rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
167 			else
168 				rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
169 		}
170 
171 		/* get signal strength of resp frames (CTS/BA/ACK) */
172 		addr = mt7921_mac_wtbl_lmac_addr(idx, 30);
173 		val = mt76_rr(dev, addr);
174 
175 		rssi[0] = to_rssi(GENMASK(7, 0), val);
176 		rssi[1] = to_rssi(GENMASK(15, 8), val);
177 		rssi[2] = to_rssi(GENMASK(23, 16), val);
178 		rssi[3] = to_rssi(GENMASK(31, 14), val);
179 
180 		msta->ack_signal =
181 			mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
182 
183 		ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
184 	}
185 }
186 
187 static void
188 mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
189 			    struct mt76_rx_status *status, u8 chfreq)
190 {
191 	if (chfreq > 180) {
192 		status->band = NL80211_BAND_6GHZ;
193 		chfreq = (chfreq - 181) * 4 + 1;
194 	} else if (chfreq > 14) {
195 		status->band = NL80211_BAND_5GHZ;
196 	} else {
197 		status->band = NL80211_BAND_2GHZ;
198 	}
199 	status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
200 }
201 
202 static void
203 mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
204 {
205 	struct sk_buff *skb = priv;
206 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
207 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
208 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
209 
210 	if (status->signal > 0)
211 		return;
212 
213 	if (!ether_addr_equal(vif->addr, hdr->addr1))
214 		return;
215 
216 	ewma_rssi_add(&mvif->rssi, -status->signal);
217 }
218 
219 static void
220 mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
221 {
222 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
223 
224 	if (!ieee80211_is_assoc_resp(hdr->frame_control) &&
225 	    !ieee80211_is_auth(hdr->frame_control))
226 		return;
227 
228 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
229 		IEEE80211_IFACE_ITER_RESUME_ALL,
230 		mt7921_mac_rssi_iter, skb);
231 }
232 
233 static int
234 mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
235 {
236 	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
237 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
238 	bool hdr_trans, unicast, insert_ccmp_hdr = false;
239 	u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
240 	u16 hdr_gap;
241 	__le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
242 	struct mt76_phy *mphy = &dev->mt76.phy;
243 	struct mt792x_phy *phy = &dev->phy;
244 	struct ieee80211_supported_band *sband;
245 	u32 csum_status = *(u32 *)skb->cb;
246 	u32 rxd0 = le32_to_cpu(rxd[0]);
247 	u32 rxd1 = le32_to_cpu(rxd[1]);
248 	u32 rxd2 = le32_to_cpu(rxd[2]);
249 	u32 rxd3 = le32_to_cpu(rxd[3]);
250 	u32 rxd4 = le32_to_cpu(rxd[4]);
251 	struct mt792x_sta *msta = NULL;
252 	u16 seq_ctrl = 0;
253 	__le16 fc = 0;
254 	u8 mode = 0;
255 	int i, idx;
256 
257 	memset(status, 0, sizeof(*status));
258 
259 	if (rxd1 & MT_RXD1_NORMAL_BAND_IDX)
260 		return -EINVAL;
261 
262 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
263 		return -EINVAL;
264 
265 	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
266 		return -EINVAL;
267 
268 	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
269 	if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
270 		return -EINVAL;
271 
272 	/* ICV error or CCMP/BIP/WPI MIC error */
273 	if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
274 		status->flag |= RX_FLAG_ONLY_MONITOR;
275 
276 	chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
277 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
278 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
279 	status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
280 
281 	if (status->wcid) {
282 		msta = container_of(status->wcid, struct mt792x_sta, wcid);
283 		spin_lock_bh(&dev->mt76.sta_poll_lock);
284 		if (list_empty(&msta->wcid.poll_list))
285 			list_add_tail(&msta->wcid.poll_list,
286 				      &dev->mt76.sta_poll_list);
287 		spin_unlock_bh(&dev->mt76.sta_poll_lock);
288 	}
289 
290 	mt7921_get_status_freq_info(dev, mphy, status, chfreq);
291 
292 	switch (status->band) {
293 	case NL80211_BAND_5GHZ:
294 		sband = &mphy->sband_5g.sband;
295 		break;
296 	case NL80211_BAND_6GHZ:
297 		sband = &mphy->sband_6g.sband;
298 		break;
299 	default:
300 		sband = &mphy->sband_2g.sband;
301 		break;
302 	}
303 
304 	if (!sband->channels)
305 		return -EINVAL;
306 
307 	if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
308 	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
309 		skb->ip_summed = CHECKSUM_UNNECESSARY;
310 
311 	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
312 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
313 
314 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
315 		status->flag |= RX_FLAG_MMIC_ERROR;
316 
317 	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
318 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
319 		status->flag |= RX_FLAG_DECRYPTED;
320 		status->flag |= RX_FLAG_IV_STRIPPED;
321 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
322 	}
323 
324 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
325 
326 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
327 		return -EINVAL;
328 
329 	rxd += 6;
330 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
331 		u32 v0 = le32_to_cpu(rxd[0]);
332 		u32 v2 = le32_to_cpu(rxd[2]);
333 
334 		fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
335 		seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
336 		qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
337 
338 		rxd += 4;
339 		if ((u8 *)rxd - skb->data >= skb->len)
340 			return -EINVAL;
341 	}
342 
343 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
344 		u8 *data = (u8 *)rxd;
345 
346 		if (status->flag & RX_FLAG_DECRYPTED) {
347 			switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
348 			case MT_CIPHER_AES_CCMP:
349 			case MT_CIPHER_CCMP_CCX:
350 			case MT_CIPHER_CCMP_256:
351 				insert_ccmp_hdr =
352 					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
353 				fallthrough;
354 			case MT_CIPHER_TKIP:
355 			case MT_CIPHER_TKIP_NO_MIC:
356 			case MT_CIPHER_GCMP:
357 			case MT_CIPHER_GCMP_256:
358 				status->iv[0] = data[5];
359 				status->iv[1] = data[4];
360 				status->iv[2] = data[3];
361 				status->iv[3] = data[2];
362 				status->iv[4] = data[1];
363 				status->iv[5] = data[0];
364 				break;
365 			default:
366 				break;
367 			}
368 		}
369 		rxd += 4;
370 		if ((u8 *)rxd - skb->data >= skb->len)
371 			return -EINVAL;
372 	}
373 
374 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
375 		status->timestamp = le32_to_cpu(rxd[0]);
376 		status->flag |= RX_FLAG_MACTIME_START;
377 
378 		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
379 			status->flag |= RX_FLAG_AMPDU_DETAILS;
380 
381 			/* all subframes of an A-MPDU have the same timestamp */
382 			if (phy->rx_ampdu_ts != status->timestamp) {
383 				if (!++phy->ampdu_ref)
384 					phy->ampdu_ref++;
385 			}
386 			phy->rx_ampdu_ts = status->timestamp;
387 
388 			status->ampdu_ref = phy->ampdu_ref;
389 		}
390 
391 		rxd += 2;
392 		if ((u8 *)rxd - skb->data >= skb->len)
393 			return -EINVAL;
394 	}
395 
396 	/* RXD Group 3 - P-RXV */
397 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
398 		u32 v0, v1;
399 		int ret;
400 
401 		rxv = rxd;
402 		rxd += 2;
403 		if ((u8 *)rxd - skb->data >= skb->len)
404 			return -EINVAL;
405 
406 		v0 = le32_to_cpu(rxv[0]);
407 		v1 = le32_to_cpu(rxv[1]);
408 
409 		if (v0 & MT_PRXV_HT_AD_CODE)
410 			status->enc_flags |= RX_ENC_FLAG_LDPC;
411 
412 		ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
413 						    rxv, &mode);
414 		if (ret < 0)
415 			return ret;
416 
417 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
418 			rxd += 6;
419 			if ((u8 *)rxd - skb->data >= skb->len)
420 				return -EINVAL;
421 
422 			rxv = rxd;
423 			/* Monitor mode would use RCPI described in GROUP 5
424 			 * instead.
425 			 */
426 			v1 = le32_to_cpu(rxv[0]);
427 
428 			rxd += 12;
429 			if ((u8 *)rxd - skb->data >= skb->len)
430 				return -EINVAL;
431 		}
432 
433 		status->chains = mphy->antenna_mask;
434 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
435 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
436 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
437 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
438 		status->signal = -128;
439 		for (i = 0; i < hweight8(mphy->antenna_mask); i++) {
440 			if (!(status->chains & BIT(i)) ||
441 			    status->chain_signal[i] >= 0)
442 				continue;
443 
444 			status->signal = max(status->signal,
445 					     status->chain_signal[i]);
446 		}
447 	}
448 
449 	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
450 	status->amsdu = !!amsdu_info;
451 	if (status->amsdu) {
452 		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
453 		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
454 	}
455 
456 	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
457 	if (hdr_trans && ieee80211_has_morefrags(fc)) {
458 		struct ieee80211_vif *vif;
459 		int err;
460 
461 		if (!msta || !msta->vif)
462 			return -EINVAL;
463 
464 		vif = container_of((void *)msta->vif, struct ieee80211_vif,
465 				   drv_priv);
466 		err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
467 		if (err)
468 			return err;
469 
470 		hdr_trans = false;
471 	} else {
472 		skb_pull(skb, hdr_gap);
473 		if (!hdr_trans && status->amsdu) {
474 			memmove(skb->data + 2, skb->data,
475 				ieee80211_get_hdrlen_from_skb(skb));
476 			skb_pull(skb, 2);
477 		}
478 	}
479 
480 	if (!hdr_trans) {
481 		struct ieee80211_hdr *hdr;
482 
483 		if (insert_ccmp_hdr) {
484 			u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
485 
486 			mt76_insert_ccmp_hdr(skb, key_id);
487 		}
488 
489 		hdr = mt76_skb_get_hdr(skb);
490 		fc = hdr->frame_control;
491 		if (ieee80211_is_data_qos(fc)) {
492 			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
493 			qos_ctl = *ieee80211_get_qos_ctl(hdr);
494 		}
495 	} else {
496 		status->flag |= RX_FLAG_8023;
497 	}
498 
499 	mt7921_mac_assoc_rssi(dev, skb);
500 
501 	if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
502 		mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
503 
504 	if (!status->wcid || !ieee80211_is_data_qos(fc))
505 		return 0;
506 
507 	status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
508 	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
509 	status->qos_ctl = qos_ctl;
510 
511 	return 0;
512 }
513 
514 void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
515 {
516 	struct mt792x_sta *msta = NULL;
517 	struct mt76_wcid *wcid;
518 	__le32 *txs_data = data;
519 	u16 wcidx;
520 	u8 pid;
521 
522 	if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
523 		return;
524 
525 	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
526 	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
527 
528 	if (pid < MT_PACKET_ID_FIRST)
529 		return;
530 
531 	if (wcidx >= MT7921_WTBL_SIZE)
532 		return;
533 
534 	rcu_read_lock();
535 
536 	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
537 	if (!wcid)
538 		goto out;
539 
540 	msta = container_of(wcid, struct mt792x_sta, wcid);
541 
542 	mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
543 	if (!wcid->sta)
544 		goto out;
545 
546 	spin_lock_bh(&dev->mt76.sta_poll_lock);
547 	if (list_empty(&msta->wcid.poll_list))
548 		list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
549 	spin_unlock_bh(&dev->mt76.sta_poll_lock);
550 
551 out:
552 	rcu_read_unlock();
553 }
554 
555 static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
556 {
557 	struct mt76_connac_tx_free *free = data;
558 	__le32 *tx_info = (__le32 *)(data + sizeof(*free));
559 	struct mt76_dev *mdev = &dev->mt76;
560 	struct mt76_txwi_cache *txwi;
561 	struct ieee80211_sta *sta = NULL;
562 	struct mt76_wcid *wcid = NULL;
563 	struct sk_buff *skb, *tmp;
564 	void *end = data + len;
565 	LIST_HEAD(free_list);
566 	bool wake = false;
567 	u8 i, count;
568 
569 	/* clean DMA queues and unmap buffers first */
570 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
571 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
572 
573 	count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
574 	if (WARN_ON_ONCE((void *)&tx_info[count] > end))
575 		return;
576 
577 	for (i = 0; i < count; i++) {
578 		u32 msdu, info = le32_to_cpu(tx_info[i]);
579 		u8 stat;
580 
581 		/* 1'b1: new wcid pair.
582 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
583 		 */
584 		if (info & MT_TX_FREE_PAIR) {
585 			struct mt792x_sta *msta;
586 			u16 idx;
587 
588 			count++;
589 			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
590 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
591 			sta = wcid_to_sta(wcid);
592 			if (!sta)
593 				continue;
594 
595 			msta = container_of(wcid, struct mt792x_sta, wcid);
596 			spin_lock_bh(&mdev->sta_poll_lock);
597 			if (list_empty(&msta->wcid.poll_list))
598 				list_add_tail(&msta->wcid.poll_list,
599 					      &mdev->sta_poll_list);
600 			spin_unlock_bh(&mdev->sta_poll_lock);
601 			continue;
602 		}
603 
604 		msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
605 		stat = FIELD_GET(MT_TX_FREE_STATUS, info);
606 
607 		if (wcid) {
608 			wcid->stats.tx_retries +=
609 				FIELD_GET(MT_TX_FREE_COUNT, info) - 1;
610 			wcid->stats.tx_failed += !!stat;
611 		}
612 
613 		txwi = mt76_token_release(mdev, msdu, &wake);
614 		if (!txwi)
615 			continue;
616 
617 		mt76_connac2_txwi_free(mdev, txwi, sta, &free_list);
618 	}
619 
620 	if (wake)
621 		mt76_set_tx_blocked(&dev->mt76, false);
622 
623 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
624 		skb_list_del_init(skb);
625 		napi_consume_skb(skb, 1);
626 	}
627 
628 	rcu_read_lock();
629 	mt7921_mac_sta_poll(dev);
630 	rcu_read_unlock();
631 
632 	mt76_worker_schedule(&dev->mt76.tx_worker);
633 }
634 
635 bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
636 {
637 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
638 	__le32 *rxd = (__le32 *)data;
639 	__le32 *end = (__le32 *)&rxd[len / 4];
640 	enum rx_pkt_type type;
641 
642 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
643 
644 	switch (type) {
645 	case PKT_TYPE_TXRX_NOTIFY:
646 		/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
647 		mt7921_mac_tx_free(dev, data, len); /* mmio */
648 		return false;
649 	case PKT_TYPE_TXS:
650 		for (rxd += 2; rxd + 8 <= end; rxd += 8)
651 			mt7921_mac_add_txs(dev, rxd);
652 		return false;
653 	default:
654 		return true;
655 	}
656 }
657 EXPORT_SYMBOL_GPL(mt7921_rx_check);
658 
659 void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
660 			 struct sk_buff *skb, u32 *info)
661 {
662 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
663 	__le32 *rxd = (__le32 *)skb->data;
664 	__le32 *end = (__le32 *)&skb->data[skb->len];
665 	enum rx_pkt_type type;
666 	u16 flag;
667 
668 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
669 	flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
670 
671 	if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
672 		type = PKT_TYPE_NORMAL_MCU;
673 
674 	switch (type) {
675 	case PKT_TYPE_TXRX_NOTIFY:
676 		/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
677 		mt7921_mac_tx_free(dev, skb->data, skb->len);
678 		napi_consume_skb(skb, 1);
679 		break;
680 	case PKT_TYPE_RX_EVENT:
681 		mt7921_mcu_rx_event(dev, skb);
682 		break;
683 	case PKT_TYPE_TXS:
684 		for (rxd += 2; rxd + 8 <= end; rxd += 8)
685 			mt7921_mac_add_txs(dev, rxd);
686 		dev_kfree_skb(skb);
687 		break;
688 	case PKT_TYPE_NORMAL_MCU:
689 	case PKT_TYPE_NORMAL:
690 		if (!mt7921_mac_fill_rx(dev, skb)) {
691 			mt76_rx(&dev->mt76, q, skb);
692 			return;
693 		}
694 		fallthrough;
695 	default:
696 		dev_kfree_skb(skb);
697 		break;
698 	}
699 }
700 EXPORT_SYMBOL_GPL(mt7921_queue_rx_skb);
701 
702 void mt7921_mac_reset_counters(struct mt792x_phy *phy)
703 {
704 	struct mt7921_dev *dev = phy->dev;
705 	int i;
706 
707 	for (i = 0; i < 4; i++) {
708 		mt76_rr(dev, MT_TX_AGG_CNT(0, i));
709 		mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
710 	}
711 
712 	dev->mt76.phy.survey_time = ktime_get_boottime();
713 	memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
714 
715 	/* reset airtime counters */
716 	mt76_rr(dev, MT_MIB_SDR9(0));
717 	mt76_rr(dev, MT_MIB_SDR36(0));
718 	mt76_rr(dev, MT_MIB_SDR37(0));
719 
720 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
721 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
722 }
723 
724 void mt7921_mac_set_timing(struct mt792x_phy *phy)
725 {
726 	s16 coverage_class = phy->coverage_class;
727 	struct mt7921_dev *dev = phy->dev;
728 	u32 val, reg_offset;
729 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
730 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
731 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
732 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
733 	bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ;
734 	int sifs = is_2ghz ? 10 : 16, offset;
735 
736 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
737 		return;
738 
739 	mt76_set(dev, MT_ARB_SCR(0),
740 		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
741 	udelay(1);
742 
743 	offset = 3 * coverage_class;
744 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
745 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
746 
747 	mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
748 	mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
749 	mt76_wr(dev, MT_TMAC_ICR0(0),
750 		FIELD_PREP(MT_IFS_EIFS, 360) |
751 		FIELD_PREP(MT_IFS_RIFS, 2) |
752 		FIELD_PREP(MT_IFS_SIFS, sifs) |
753 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
754 
755 	if (phy->slottime < 20 || !is_2ghz)
756 		val = MT7921_CFEND_RATE_DEFAULT;
757 	else
758 		val = MT7921_CFEND_RATE_11B;
759 
760 	mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
761 	mt76_clear(dev, MT_ARB_SCR(0),
762 		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
763 }
764 
765 static u8
766 mt7921_phy_get_nf(struct mt792x_phy *phy, int idx)
767 {
768 	return 0;
769 }
770 
771 static void
772 mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
773 {
774 	struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
775 	struct mt792x_phy *phy = (struct mt792x_phy *)mphy->priv;
776 	struct mt76_channel_state *state;
777 	u64 busy_time, tx_time, rx_time, obss_time;
778 	int nf;
779 
780 	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
781 				   MT_MIB_SDR9_BUSY_MASK);
782 	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
783 				 MT_MIB_SDR36_TXTIME_MASK);
784 	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
785 				 MT_MIB_SDR37_RXTIME_MASK);
786 	obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
787 				   MT_MIB_OBSSTIME_MASK);
788 
789 	nf = mt7921_phy_get_nf(phy, idx);
790 	if (!phy->noise)
791 		phy->noise = nf << 4;
792 	else if (nf)
793 		phy->noise += nf - (phy->noise >> 4);
794 
795 	state = mphy->chan_state;
796 	state->cc_busy += busy_time;
797 	state->cc_tx += tx_time;
798 	state->cc_rx += rx_time + obss_time;
799 	state->cc_bss_rx += rx_time;
800 	state->noise = -(phy->noise >> 4);
801 }
802 
803 void mt7921_update_channel(struct mt76_phy *mphy)
804 {
805 	struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
806 
807 	if (mt76_connac_pm_wake(mphy, &dev->pm))
808 		return;
809 
810 	mt7921_phy_update_channel(mphy, 0);
811 	/* reset obss airtime */
812 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
813 
814 	mt76_connac_power_save_sched(mphy, &dev->pm);
815 }
816 EXPORT_SYMBOL_GPL(mt7921_update_channel);
817 
818 static void
819 mt7921_vif_connect_iter(void *priv, u8 *mac,
820 			struct ieee80211_vif *vif)
821 {
822 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
823 	struct mt7921_dev *dev = mvif->phy->dev;
824 	struct ieee80211_hw *hw = mt76_hw(dev);
825 
826 	if (vif->type == NL80211_IFTYPE_STATION)
827 		ieee80211_disconnect(vif, true);
828 
829 	mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
830 	mt7921_mcu_set_tx(dev, vif);
831 
832 	if (vif->type == NL80211_IFTYPE_AP) {
833 		mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
834 					    true, NULL);
835 		mt7921_mcu_sta_update(dev, NULL, vif, true,
836 				      MT76_STA_INFO_STATE_NONE);
837 		mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
838 	}
839 }
840 
841 /* system error recovery */
842 void mt7921_mac_reset_work(struct work_struct *work)
843 {
844 	struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
845 					      reset_work);
846 	struct ieee80211_hw *hw = mt76_hw(dev);
847 	struct mt76_connac_pm *pm = &dev->pm;
848 	int i, ret;
849 
850 	dev_dbg(dev->mt76.dev, "chip reset\n");
851 	dev->hw_full_reset = true;
852 	ieee80211_stop_queues(hw);
853 
854 	cancel_delayed_work_sync(&dev->mphy.mac_work);
855 	cancel_delayed_work_sync(&pm->ps_work);
856 	cancel_work_sync(&pm->wake_work);
857 
858 	for (i = 0; i < 10; i++) {
859 		mutex_lock(&dev->mt76.mutex);
860 		ret = mt7921_dev_reset(dev);
861 		mutex_unlock(&dev->mt76.mutex);
862 
863 		if (!ret)
864 			break;
865 	}
866 
867 	if (i == 10)
868 		dev_err(dev->mt76.dev, "chip reset failed\n");
869 
870 	if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
871 		struct cfg80211_scan_info info = {
872 			.aborted = true,
873 		};
874 
875 		ieee80211_scan_completed(dev->mphy.hw, &info);
876 	}
877 
878 	dev->hw_full_reset = false;
879 	pm->suspended = false;
880 	ieee80211_wake_queues(hw);
881 	ieee80211_iterate_active_interfaces(hw,
882 					    IEEE80211_IFACE_ITER_RESUME_ALL,
883 					    mt7921_vif_connect_iter, NULL);
884 	mt76_connac_power_save_sched(&dev->mt76.phy, pm);
885 }
886 
887 void mt7921_reset(struct mt76_dev *mdev)
888 {
889 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
890 	struct mt76_connac_pm *pm = &dev->pm;
891 
892 	if (!dev->hw_init_done)
893 		return;
894 
895 	if (dev->hw_full_reset)
896 		return;
897 
898 	if (pm->suspended)
899 		return;
900 
901 	queue_work(dev->mt76.wq, &dev->reset_work);
902 }
903 EXPORT_SYMBOL_GPL(mt7921_reset);
904 
905 void mt7921_mac_update_mib_stats(struct mt792x_phy *phy)
906 {
907 	struct mt76_mib_stats *mib = &phy->mib;
908 	struct mt7921_dev *dev = phy->dev;
909 	int i, aggr0 = 0, aggr1;
910 	u32 val;
911 
912 	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
913 					   MT_MIB_SDR3_FCS_ERR_MASK);
914 	mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
915 					    MT_MIB_ACK_FAIL_COUNT_MASK);
916 	mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
917 					   MT_MIB_BA_FAIL_COUNT_MASK);
918 	mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
919 				       MT_MIB_RTS_COUNT_MASK);
920 	mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
921 					       MT_MIB_RTS_FAIL_COUNT_MASK);
922 
923 	mib->tx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR12(0));
924 	mib->tx_mpdu_attempts_cnt += mt76_rr(dev, MT_MIB_SDR14(0));
925 	mib->tx_mpdu_success_cnt += mt76_rr(dev, MT_MIB_SDR15(0));
926 
927 	val = mt76_rr(dev, MT_MIB_SDR32(0));
928 	mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR9_EBF_CNT_MASK, val);
929 	mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR9_IBF_CNT_MASK, val);
930 
931 	val = mt76_rr(dev, MT_ETBF_TX_APP_CNT(0));
932 	mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, val);
933 	mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, val);
934 
935 	val = mt76_rr(dev, MT_ETBF_RX_FB_CNT(0));
936 	mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, val);
937 	mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, val);
938 	mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, val);
939 	mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, val);
940 
941 	mib->rx_mpdu_cnt += mt76_rr(dev, MT_MIB_SDR5(0));
942 	mib->rx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR22(0));
943 	mib->rx_ampdu_bytes_cnt += mt76_rr(dev, MT_MIB_SDR23(0));
944 	mib->rx_ba_cnt += mt76_rr(dev, MT_MIB_SDR31(0));
945 
946 	for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
947 		val = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
948 		mib->tx_amsdu[i] += val;
949 		mib->tx_amsdu_cnt += val;
950 	}
951 
952 	for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
953 		u32 val2;
954 
955 		val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
956 		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
957 
958 		phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
959 		phy->mt76->aggr_stats[aggr0++] += val >> 16;
960 		phy->mt76->aggr_stats[aggr1++] += val2 & 0xffff;
961 		phy->mt76->aggr_stats[aggr1++] += val2 >> 16;
962 	}
963 }
964 
965 void mt7921_mac_work(struct work_struct *work)
966 {
967 	struct mt792x_phy *phy;
968 	struct mt76_phy *mphy;
969 
970 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
971 					       mac_work.work);
972 	phy = mphy->priv;
973 
974 	mt7921_mutex_acquire(phy->dev);
975 
976 	mt76_update_survey(mphy);
977 	if (++mphy->mac_work_count == 2) {
978 		mphy->mac_work_count = 0;
979 
980 		mt7921_mac_update_mib_stats(phy);
981 	}
982 
983 	mt7921_mutex_release(phy->dev);
984 
985 	mt76_tx_status_check(mphy->dev, false);
986 	ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
987 				     MT7921_WATCHDOG_TIME);
988 }
989 
990 void mt7921_pm_wake_work(struct work_struct *work)
991 {
992 	struct mt7921_dev *dev;
993 	struct mt76_phy *mphy;
994 
995 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
996 						pm.wake_work);
997 	mphy = dev->phy.mt76;
998 
999 	if (!mt7921_mcu_drv_pmctrl(dev)) {
1000 		struct mt76_dev *mdev = &dev->mt76;
1001 		int i;
1002 
1003 		if (mt76_is_sdio(mdev)) {
1004 			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1005 			mt76_worker_schedule(&mdev->sdio.txrx_worker);
1006 		} else {
1007 			local_bh_disable();
1008 			mt76_for_each_q_rx(mdev, i)
1009 				napi_schedule(&mdev->napi[i]);
1010 			local_bh_enable();
1011 			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1012 			mt76_connac_tx_cleanup(mdev);
1013 		}
1014 		if (test_bit(MT76_STATE_RUNNING, &mphy->state))
1015 			ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1016 						     MT7921_WATCHDOG_TIME);
1017 	}
1018 
1019 	ieee80211_wake_queues(mphy->hw);
1020 	wake_up(&dev->pm.wait);
1021 }
1022 
1023 void mt7921_pm_power_save_work(struct work_struct *work)
1024 {
1025 	struct mt7921_dev *dev;
1026 	unsigned long delta;
1027 	struct mt76_phy *mphy;
1028 
1029 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1030 						pm.ps_work.work);
1031 	mphy = dev->phy.mt76;
1032 
1033 	delta = dev->pm.idle_timeout;
1034 	if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
1035 	    test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) ||
1036 	    dev->fw_assert)
1037 		goto out;
1038 
1039 	if (mutex_is_locked(&dev->mt76.mutex))
1040 		/* if mt76 mutex is held we should not put the device
1041 		 * to sleep since we are currently accessing device
1042 		 * register map. We need to wait for the next power_save
1043 		 * trigger.
1044 		 */
1045 		goto out;
1046 
1047 	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
1048 		delta = dev->pm.last_activity + delta - jiffies;
1049 		goto out;
1050 	}
1051 
1052 	if (!mt7921_mcu_fw_pmctrl(dev)) {
1053 		cancel_delayed_work_sync(&mphy->mac_work);
1054 		return;
1055 	}
1056 out:
1057 	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
1058 }
1059 
1060 void mt7921_coredump_work(struct work_struct *work)
1061 {
1062 	struct mt7921_dev *dev;
1063 	char *dump, *data;
1064 
1065 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1066 						coredump.work.work);
1067 
1068 	if (time_is_after_jiffies(dev->coredump.last_activity +
1069 				  4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1070 		queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1071 				   MT76_CONNAC_COREDUMP_TIMEOUT);
1072 		return;
1073 	}
1074 
1075 	dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1076 	data = dump;
1077 
1078 	while (true) {
1079 		struct sk_buff *skb;
1080 
1081 		spin_lock_bh(&dev->mt76.lock);
1082 		skb = __skb_dequeue(&dev->coredump.msg_list);
1083 		spin_unlock_bh(&dev->mt76.lock);
1084 
1085 		if (!skb)
1086 			break;
1087 
1088 		skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
1089 		if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
1090 			dev_kfree_skb(skb);
1091 			continue;
1092 		}
1093 
1094 		memcpy(data, skb->data, skb->len);
1095 		data += skb->len;
1096 
1097 		dev_kfree_skb(skb);
1098 	}
1099 
1100 	if (dump)
1101 		dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1102 			      GFP_KERNEL);
1103 
1104 	mt7921_reset(&dev->mt76);
1105 }
1106 
1107 /* usb_sdio */
1108 static void
1109 mt7921_usb_sdio_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
1110 			   enum mt76_txq_id qid, struct ieee80211_sta *sta,
1111 			   struct ieee80211_key_conf *key, int pid,
1112 			   struct sk_buff *skb)
1113 {
1114 	__le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
1115 
1116 	memset(txwi, 0, MT_SDIO_TXD_SIZE);
1117 	mt76_connac2_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
1118 	skb_push(skb, MT_SDIO_TXD_SIZE);
1119 }
1120 
1121 int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1122 				   enum mt76_txq_id qid, struct mt76_wcid *wcid,
1123 				   struct ieee80211_sta *sta,
1124 				   struct mt76_tx_info *tx_info)
1125 {
1126 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1127 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1128 	struct ieee80211_key_conf *key = info->control.hw_key;
1129 	struct sk_buff *skb = tx_info->skb;
1130 	int err, pad, pktid, type;
1131 
1132 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
1133 		return -EINVAL;
1134 
1135 	err = skb_cow_head(skb, MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE);
1136 	if (err)
1137 		return err;
1138 
1139 	if (!wcid)
1140 		wcid = &dev->mt76.global_wcid;
1141 
1142 	if (sta) {
1143 		struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
1144 
1145 		if (time_after(jiffies, msta->last_txs + HZ / 4)) {
1146 			info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1147 			msta->last_txs = jiffies;
1148 		}
1149 	}
1150 
1151 	pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
1152 	mt7921_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
1153 
1154 	type = mt76_is_sdio(mdev) ? MT7921_SDIO_DATA : 0;
1155 	mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
1156 	pad = round_up(skb->len, 4) - skb->len;
1157 	if (mt76_is_usb(mdev))
1158 		pad += 4;
1159 
1160 	err = mt76_skb_adjust_pad(skb, pad);
1161 	if (err)
1162 		/* Release pktid in case of error. */
1163 		idr_remove(&wcid->pktid, pktid);
1164 
1165 	return err;
1166 }
1167 EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_prepare_skb);
1168 
1169 void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
1170 				     struct mt76_queue_entry *e)
1171 {
1172 	__le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
1173 	unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
1174 	struct ieee80211_sta *sta;
1175 	struct mt76_wcid *wcid;
1176 	u16 idx;
1177 
1178 	idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1179 	wcid = rcu_dereference(mdev->wcid[idx]);
1180 	sta = wcid_to_sta(wcid);
1181 
1182 	if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1183 		mt76_connac2_tx_check_aggr(sta, txwi);
1184 
1185 	skb_pull(e->skb, headroom);
1186 	mt76_tx_complete_skb(mdev, e->wcid, e->skb);
1187 }
1188 EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_complete_skb);
1189 
1190 bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
1191 {
1192 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1193 
1194 	mt7921_mutex_acquire(dev);
1195 	mt7921_mac_sta_poll(dev);
1196 	mt7921_mutex_release(dev);
1197 
1198 	return false;
1199 }
1200 EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data);
1201 
1202 #if IS_ENABLED(CONFIG_IPV6)
1203 void mt7921_set_ipv6_ns_work(struct work_struct *work)
1204 {
1205 	struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
1206 						ipv6_ns_work);
1207 	struct sk_buff *skb;
1208 	int ret = 0;
1209 
1210 	do {
1211 		skb = skb_dequeue(&dev->ipv6_ns_list);
1212 
1213 		if (!skb)
1214 			break;
1215 
1216 		mt7921_mutex_acquire(dev);
1217 		ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
1218 					    MCU_UNI_CMD(OFFLOAD), true);
1219 		mt7921_mutex_release(dev);
1220 
1221 	} while (!ret);
1222 
1223 	if (ret)
1224 		skb_queue_purge(&dev->ipv6_ns_list);
1225 }
1226 #endif
1227