xref: /freebsd/sys/contrib/dev/mediatek/mt76/mt7615/mac.c (revision e32fecd0c2c3ee37c47ee100f169e7eb0282a873)
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2019 MediaTek Inc.
3  *
4  * Author: Ryder Lee <ryder.lee@mediatek.com>
5  *         Roy Luo <royluo@google.com>
6  *         Felix Fietkau <nbd@nbd.name>
7  *         Lorenzo Bianconi <lorenzo@kernel.org>
8  */
9 
10 #include <linux/devcoredump.h>
11 #include <linux/etherdevice.h>
12 #include <linux/timekeeping.h>
13 #include "mt7615.h"
14 #include "../trace.h"
15 #include "../dma.h"
16 #include "mt7615_trace.h"
17 #include "mac.h"
18 #include "mcu.h"
19 
20 #define to_rssi(field, rxv)		((FIELD_GET(field, rxv) - 220) / 2)
21 
22 static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
23 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
24 	.radar_pattern = {
25 		[5] =  { 1, 0,  6, 32, 28, 0, 17,  990, 5010, 1, 1 },
26 		[6] =  { 1, 0,  9, 32, 28, 0, 27,  615, 5010, 1, 1 },
27 		[7] =  { 1, 0, 15, 32, 28, 0, 27,  240,  445, 1, 1 },
28 		[8] =  { 1, 0, 12, 32, 28, 0, 42,  240,  510, 1, 1 },
29 		[9] =  { 1, 1,  0,  0,  0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 },
30 		[10] = { 1, 1,  0,  0,  0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 },
31 		[11] = { 1, 1,  0,  0,  0, 0, 14,  823, 2510, 0, 0, 18, 32, 28 },
32 		[12] = { 1, 1,  0,  0,  0, 0, 14,  823, 2510, 0, 0, 27, 32, 24 },
33 	},
34 };
35 
36 static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
37 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
38 	.radar_pattern = {
39 		[0] = { 1, 0,  9,  32, 28, 0, 13, 508, 3076, 1,  1 },
40 		[1] = { 1, 0, 12,  32, 28, 0, 17, 140,  240, 1,  1 },
41 		[2] = { 1, 0,  8,  32, 28, 0, 22, 190,  510, 1,  1 },
42 		[3] = { 1, 0,  6,  32, 28, 0, 32, 190,  510, 1,  1 },
43 		[4] = { 1, 0,  9, 255, 28, 0, 13, 323,  343, 1, 32 },
44 	},
45 };
46 
47 static const struct mt7615_dfs_radar_spec jp_radar_specs = {
48 	.pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
49 	.radar_pattern = {
50 		[0] =  { 1, 0,  8, 32, 28, 0, 13,  508, 3076, 1,  1 },
51 		[1] =  { 1, 0, 12, 32, 28, 0, 17,  140,  240, 1,  1 },
52 		[2] =  { 1, 0,  8, 32, 28, 0, 22,  190,  510, 1,  1 },
53 		[3] =  { 1, 0,  6, 32, 28, 0, 32,  190,  510, 1,  1 },
54 		[4] =  { 1, 0,  9, 32, 28, 0, 13,  323,  343, 1, 32 },
55 		[13] = { 1, 0, 8,  32, 28, 0, 14, 3836, 3856, 1,  1 },
56 		[14] = { 1, 0, 8,  32, 28, 0, 14, 3990, 4010, 1,  1 },
57 	},
58 };
59 
60 static enum mt76_cipher_type
61 mt7615_mac_get_cipher(int cipher)
62 {
63 	switch (cipher) {
64 	case WLAN_CIPHER_SUITE_WEP40:
65 		return MT_CIPHER_WEP40;
66 	case WLAN_CIPHER_SUITE_WEP104:
67 		return MT_CIPHER_WEP104;
68 	case WLAN_CIPHER_SUITE_TKIP:
69 		return MT_CIPHER_TKIP;
70 	case WLAN_CIPHER_SUITE_AES_CMAC:
71 		return MT_CIPHER_BIP_CMAC_128;
72 	case WLAN_CIPHER_SUITE_CCMP:
73 		return MT_CIPHER_AES_CCMP;
74 	case WLAN_CIPHER_SUITE_CCMP_256:
75 		return MT_CIPHER_CCMP_256;
76 	case WLAN_CIPHER_SUITE_GCMP:
77 		return MT_CIPHER_GCMP;
78 	case WLAN_CIPHER_SUITE_GCMP_256:
79 		return MT_CIPHER_GCMP_256;
80 	case WLAN_CIPHER_SUITE_SMS4:
81 		return MT_CIPHER_WAPI;
82 	default:
83 		return MT_CIPHER_NONE;
84 	}
85 }
86 
87 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
88 					    u8 idx, bool unicast)
89 {
90 	struct mt7615_sta *sta;
91 	struct mt76_wcid *wcid;
92 
93 	if (idx >= MT7615_WTBL_SIZE)
94 		return NULL;
95 
96 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
97 	if (unicast || !wcid)
98 		return wcid;
99 
100 	if (!wcid->sta)
101 		return NULL;
102 
103 	sta = container_of(wcid, struct mt7615_sta, wcid);
104 	if (!sta->vif)
105 		return NULL;
106 
107 	return &sta->vif->sta.wcid;
108 }
109 
110 void mt7615_mac_reset_counters(struct mt7615_dev *dev)
111 {
112 	struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
113 	int i;
114 
115 	for (i = 0; i < 4; i++) {
116 		mt76_rr(dev, MT_TX_AGG_CNT(0, i));
117 		mt76_rr(dev, MT_TX_AGG_CNT(1, i));
118 	}
119 
120 	memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
121 	dev->mt76.phy.survey_time = ktime_get_boottime();
122 	if (mphy_ext)
123 		mphy_ext->survey_time = ktime_get_boottime();
124 
125 	/* reset airtime counters */
126 	mt76_rr(dev, MT_MIB_SDR9(0));
127 	mt76_rr(dev, MT_MIB_SDR9(1));
128 
129 	mt76_rr(dev, MT_MIB_SDR36(0));
130 	mt76_rr(dev, MT_MIB_SDR36(1));
131 
132 	mt76_rr(dev, MT_MIB_SDR37(0));
133 	mt76_rr(dev, MT_MIB_SDR37(1));
134 
135 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
136 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
137 }
138 
139 void mt7615_mac_set_timing(struct mt7615_phy *phy)
140 {
141 	s16 coverage_class = phy->coverage_class;
142 	struct mt7615_dev *dev = phy->dev;
143 	bool ext_phy = phy != &dev->phy;
144 	u32 val, reg_offset;
145 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
146 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
147 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
148 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
149 	int sifs, offset;
150 	bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
151 
152 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
153 		return;
154 
155 	if (is_5ghz)
156 		sifs = 16;
157 	else
158 		sifs = 10;
159 
160 	if (ext_phy) {
161 		coverage_class = max_t(s16, dev->phy.coverage_class,
162 				       coverage_class);
163 		mt76_set(dev, MT_ARB_SCR,
164 			 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
165 	} else {
166 		struct mt7615_phy *phy_ext = mt7615_ext_phy(dev);
167 
168 		if (phy_ext)
169 			coverage_class = max_t(s16, phy_ext->coverage_class,
170 					       coverage_class);
171 		mt76_set(dev, MT_ARB_SCR,
172 			 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
173 	}
174 	udelay(1);
175 
176 	offset = 3 * coverage_class;
177 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
178 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
179 	mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset);
180 	mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset);
181 
182 	mt76_wr(dev, MT_TMAC_ICR(ext_phy),
183 		FIELD_PREP(MT_IFS_EIFS, 360) |
184 		FIELD_PREP(MT_IFS_RIFS, 2) |
185 		FIELD_PREP(MT_IFS_SIFS, sifs) |
186 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
187 
188 	if (phy->slottime < 20 || is_5ghz)
189 		val = MT7615_CFEND_RATE_DEFAULT;
190 	else
191 		val = MT7615_CFEND_RATE_11B;
192 
193 	mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
194 	if (ext_phy)
195 		mt76_clear(dev, MT_ARB_SCR,
196 			   MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
197 	else
198 		mt76_clear(dev, MT_ARB_SCR,
199 			   MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
200 
201 }
202 
203 static void
204 mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy,
205 			    struct mt76_rx_status *status, u8 chfreq)
206 {
207 	if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
208 	    !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
209 	    !test_bit(MT76_STATE_ROC, &mphy->state)) {
210 		status->freq = mphy->chandef.chan->center_freq;
211 		status->band = mphy->chandef.chan->band;
212 		return;
213 	}
214 
215 	status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
216 	status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
217 }
218 
219 static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
220 {
221 #ifdef CONFIG_NL80211_TESTMODE
222 	u32 rxv1 = le32_to_cpu(rxv[0]);
223 	u32 rxv3 = le32_to_cpu(rxv[2]);
224 	u32 rxv4 = le32_to_cpu(rxv[3]);
225 	u32 rxv5 = le32_to_cpu(rxv[4]);
226 	u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1);
227 	u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1);
228 	s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5);
229 	u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000;
230 
231 	if (!mode) {
232 		/* CCK */
233 		foe &= ~BIT(11);
234 		foe *= 1000;
235 		foe >>= 11;
236 	} else {
237 		if (foe > 2048)
238 			foe -= 4096;
239 
240 		foe = (foe * foe_const) >> 15;
241 	}
242 
243 	phy->test.last_freq_offset = foe;
244 	phy->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4);
245 	phy->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
246 	phy->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
247 	phy->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
248 	phy->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
249 	phy->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
250 #endif
251 }
252 
253 /* The HW does not translate the mac header to 802.3 for mesh point */
254 static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
255 {
256 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
257 	struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
258 	struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
259 	__le32 *rxd = (__le32 *)skb->data;
260 	struct ieee80211_sta *sta;
261 	struct ieee80211_vif *vif;
262 	struct ieee80211_hdr hdr;
263 	u16 frame_control;
264 
265 	if (le32_get_bits(rxd[1], MT_RXD1_NORMAL_ADDR_TYPE) !=
266 	    MT_RXD1_NORMAL_U2M)
267 		return -EINVAL;
268 
269 	if (!(le32_to_cpu(rxd[0]) & MT_RXD0_NORMAL_GROUP_4))
270 		return -EINVAL;
271 
272 	if (!msta || !msta->vif)
273 		return -EINVAL;
274 
275 	sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
276 	vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
277 
278 	/* store the info from RXD and ethhdr to avoid being overridden */
279 	frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL);
280 	hdr.frame_control = cpu_to_le16(frame_control);
281 	hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL));
282 	hdr.duration_id = 0;
283 
284 	ether_addr_copy(hdr.addr1, vif->addr);
285 	ether_addr_copy(hdr.addr2, sta->addr);
286 	switch (frame_control & (IEEE80211_FCTL_TODS |
287 				 IEEE80211_FCTL_FROMDS)) {
288 	case 0:
289 		ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
290 		break;
291 	case IEEE80211_FCTL_FROMDS:
292 		ether_addr_copy(hdr.addr3, eth_hdr->h_source);
293 		break;
294 	case IEEE80211_FCTL_TODS:
295 		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
296 		break;
297 	case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
298 		ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
299 		ether_addr_copy(hdr.addr4, eth_hdr->h_source);
300 		break;
301 	default:
302 		break;
303 	}
304 
305 	skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
306 	if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
307 	    eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
308 		ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
309 	else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
310 		ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
311 	else
312 		skb_pull(skb, 2);
313 
314 	if (ieee80211_has_order(hdr.frame_control))
315 		memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7],
316 		       IEEE80211_HT_CTL_LEN);
317 
318 	if (ieee80211_is_data_qos(hdr.frame_control)) {
319 		__le16 qos_ctrl;
320 
321 		qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL));
322 		memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
323 		       IEEE80211_QOS_CTL_LEN);
324 	}
325 
326 	if (ieee80211_has_a4(hdr.frame_control))
327 		memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
328 	else
329 		memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
330 
331 	status->flag &= ~(RX_FLAG_RADIOTAP_HE | RX_FLAG_RADIOTAP_HE_MU);
332 	return 0;
333 }
334 
335 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
336 {
337 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
338 	struct mt76_phy *mphy = &dev->mt76.phy;
339 	struct mt7615_phy *phy = &dev->phy;
340 	struct ieee80211_supported_band *sband;
341 	struct ieee80211_hdr *hdr;
342 	struct mt7615_phy *phy2;
343 	__le32 *rxd = (__le32 *)skb->data;
344 	u32 rxd0 = le32_to_cpu(rxd[0]);
345 	u32 rxd1 = le32_to_cpu(rxd[1]);
346 	u32 rxd2 = le32_to_cpu(rxd[2]);
347 	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
348 	u32 csum_status = *(u32 *)skb->cb;
349 	bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
350 	u16 hdr_gap;
351 	int phy_idx;
352 	int i, idx;
353 	u8 chfreq, amsdu_info, qos_ctl = 0;
354 	u16 seq_ctrl = 0;
355 	__le16 fc = 0;
356 
357 	memset(status, 0, sizeof(*status));
358 
359 	chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
360 
361 	phy2 = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
362 	if (!phy2)
363 		phy_idx = 0;
364 	else if (phy2->chfreq == phy->chfreq)
365 		phy_idx = -1;
366 	else if (phy->chfreq == chfreq)
367 		phy_idx = 0;
368 	else if (phy2->chfreq == chfreq)
369 		phy_idx = 1;
370 	else
371 		phy_idx = -1;
372 
373 	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
374 		return -EINVAL;
375 
376 	hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS;
377 	if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_CM))
378 		return -EINVAL;
379 
380 	/* ICV error or CCMP/BIP/WPI MIC error */
381 	if (rxd2 & MT_RXD2_NORMAL_ICV_ERR)
382 		status->flag |= RX_FLAG_ONLY_MONITOR;
383 
384 	unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
385 	idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
386 	status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
387 
388 	if (status->wcid) {
389 		struct mt7615_sta *msta;
390 
391 		msta = container_of(status->wcid, struct mt7615_sta, wcid);
392 		spin_lock_bh(&dev->sta_poll_lock);
393 		if (list_empty(&msta->poll_list))
394 			list_add_tail(&msta->poll_list, &dev->sta_poll_list);
395 		spin_unlock_bh(&dev->sta_poll_lock);
396 	}
397 
398 	if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
399 	    !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
400 		skb->ip_summed = CHECKSUM_UNNECESSARY;
401 
402 	if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
403 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
404 
405 	if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
406 		status->flag |= RX_FLAG_MMIC_ERROR;
407 
408 	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
409 	    !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
410 		status->flag |= RX_FLAG_DECRYPTED;
411 		status->flag |= RX_FLAG_IV_STRIPPED;
412 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
413 	}
414 
415 	remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
416 
417 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
418 		return -EINVAL;
419 
420 	rxd += 4;
421 	if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
422 		u32 v0 = le32_to_cpu(rxd[0]);
423 		u32 v2 = le32_to_cpu(rxd[2]);
424 
425 		fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0));
426 		qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2);
427 		seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2);
428 
429 		rxd += 4;
430 		if ((u8 *)rxd - skb->data >= skb->len)
431 			return -EINVAL;
432 	}
433 
434 	if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
435 		u8 *data = (u8 *)rxd;
436 
437 		if (status->flag & RX_FLAG_DECRYPTED) {
438 			switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
439 			case MT_CIPHER_AES_CCMP:
440 			case MT_CIPHER_CCMP_CCX:
441 			case MT_CIPHER_CCMP_256:
442 				insert_ccmp_hdr =
443 					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
444 				fallthrough;
445 			case MT_CIPHER_TKIP:
446 			case MT_CIPHER_TKIP_NO_MIC:
447 			case MT_CIPHER_GCMP:
448 			case MT_CIPHER_GCMP_256:
449 				status->iv[0] = data[5];
450 				status->iv[1] = data[4];
451 				status->iv[2] = data[3];
452 				status->iv[3] = data[2];
453 				status->iv[4] = data[1];
454 				status->iv[5] = data[0];
455 				break;
456 			default:
457 				break;
458 			}
459 		}
460 		rxd += 4;
461 		if ((u8 *)rxd - skb->data >= skb->len)
462 			return -EINVAL;
463 	}
464 
465 	if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
466 		status->timestamp = le32_to_cpu(rxd[0]);
467 		status->flag |= RX_FLAG_MACTIME_START;
468 
469 		if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
470 			      MT_RXD2_NORMAL_NON_AMPDU))) {
471 			status->flag |= RX_FLAG_AMPDU_DETAILS;
472 
473 			/* all subframes of an A-MPDU have the same timestamp */
474 			if (phy->rx_ampdu_ts != status->timestamp) {
475 				if (!++phy->ampdu_ref)
476 					phy->ampdu_ref++;
477 			}
478 			phy->rx_ampdu_ts = status->timestamp;
479 
480 			status->ampdu_ref = phy->ampdu_ref;
481 		}
482 
483 		rxd += 2;
484 		if ((u8 *)rxd - skb->data >= skb->len)
485 			return -EINVAL;
486 	}
487 
488 	if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
489 		u32 rxdg5 = le32_to_cpu(rxd[5]);
490 
491 		/*
492 		 * If both PHYs are on the same channel and we don't have a WCID,
493 		 * we need to figure out which PHY this packet was received on.
494 		 * On the primary PHY, the noise value for the chains belonging to the
495 		 * second PHY will be set to the noise value of the last packet from
496 		 * that PHY.
497 		 */
498 		if (phy_idx < 0) {
499 			int first_chain = ffs(phy2->mt76->chainmask) - 1;
500 
501 			phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0;
502 		}
503 	}
504 
505 	if (phy_idx == 1 && phy2) {
506 		mphy = dev->mt76.phys[MT_BAND1];
507 		phy = phy2;
508 		status->phy_idx = phy_idx;
509 	}
510 
511 	if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq)
512 		return -EINVAL;
513 
514 	mt7615_get_status_freq_info(dev, mphy, status, chfreq);
515 	if (status->band == NL80211_BAND_5GHZ)
516 		sband = &mphy->sband_5g.sband;
517 	else
518 		sband = &mphy->sband_2g.sband;
519 
520 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
521 		return -EINVAL;
522 
523 	if (!sband->channels)
524 		return -EINVAL;
525 
526 	if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
527 		u32 rxdg0 = le32_to_cpu(rxd[0]);
528 		u32 rxdg1 = le32_to_cpu(rxd[1]);
529 		u32 rxdg3 = le32_to_cpu(rxd[3]);
530 		u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
531 		bool cck = false;
532 
533 		i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
534 		switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
535 		case MT_PHY_TYPE_CCK:
536 			cck = true;
537 			fallthrough;
538 		case MT_PHY_TYPE_OFDM:
539 			i = mt76_get_rate(&dev->mt76, sband, i, cck);
540 			break;
541 		case MT_PHY_TYPE_HT_GF:
542 		case MT_PHY_TYPE_HT:
543 			status->encoding = RX_ENC_HT;
544 			if (i > 31)
545 				return -EINVAL;
546 			break;
547 		case MT_PHY_TYPE_VHT:
548 			status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
549 			status->encoding = RX_ENC_VHT;
550 			break;
551 		default:
552 			return -EINVAL;
553 		}
554 		status->rate_idx = i;
555 
556 		switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
557 		case MT_PHY_BW_20:
558 			break;
559 		case MT_PHY_BW_40:
560 			status->bw = RATE_INFO_BW_40;
561 			break;
562 		case MT_PHY_BW_80:
563 			status->bw = RATE_INFO_BW_80;
564 			break;
565 		case MT_PHY_BW_160:
566 			status->bw = RATE_INFO_BW_160;
567 			break;
568 		default:
569 			return -EINVAL;
570 		}
571 
572 		if (rxdg0 & MT_RXV1_HT_SHORT_GI)
573 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
574 		if (rxdg0 & MT_RXV1_HT_AD_CODE)
575 			status->enc_flags |= RX_ENC_FLAG_LDPC;
576 
577 		status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
578 
579 		status->chains = mphy->antenna_mask;
580 		status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
581 		status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
582 		status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
583 		status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
584 
585 		mt7615_mac_fill_tm_rx(mphy->priv, rxd);
586 
587 		rxd += 6;
588 		if ((u8 *)rxd - skb->data >= skb->len)
589 			return -EINVAL;
590 	}
591 
592 	amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1);
593 	status->amsdu = !!amsdu_info;
594 	if (status->amsdu) {
595 		status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME;
596 		status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME;
597 	}
598 
599 	hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
600 	if (hdr_trans && ieee80211_has_morefrags(fc)) {
601 		if (mt7615_reverse_frag0_hdr_trans(skb, hdr_gap))
602 			return -EINVAL;
603 		hdr_trans = false;
604 	} else {
605 		int pad_start = 0;
606 
607 		skb_pull(skb, hdr_gap);
608 		if (!hdr_trans && status->amsdu) {
609 			pad_start = ieee80211_get_hdrlen_from_skb(skb);
610 		} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
611 			/*
612 			 * When header translation failure is indicated,
613 			 * the hardware will insert an extra 2-byte field
614 			 * containing the data length after the protocol
615 			 * type field. This happens either when the LLC-SNAP
616 			 * pattern did not match, or if a VLAN header was
617 			 * detected.
618 			 */
619 			pad_start = 12;
620 			if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
621 				pad_start += 4;
622 			else
623 				pad_start = 0;
624 		}
625 
626 		if (pad_start) {
627 			memmove(skb->data + 2, skb->data, pad_start);
628 			skb_pull(skb, 2);
629 		}
630 	}
631 
632 	if (insert_ccmp_hdr && !hdr_trans) {
633 		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
634 
635 		mt76_insert_ccmp_hdr(skb, key_id);
636 	}
637 
638 	if (!hdr_trans) {
639 		hdr = (struct ieee80211_hdr *)skb->data;
640 		fc = hdr->frame_control;
641 		if (ieee80211_is_data_qos(fc)) {
642 			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
643 			qos_ctl = *ieee80211_get_qos_ctl(hdr);
644 		}
645 	} else {
646 		status->flag |= RX_FLAG_8023;
647 	}
648 
649 	if (!status->wcid || !ieee80211_is_data_qos(fc))
650 		return 0;
651 
652 	status->aggr = unicast &&
653 		       !ieee80211_is_qos_nullfunc(fc);
654 	status->qos_ctl = qos_ctl;
655 	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
656 
657 	return 0;
658 }
659 
660 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
661 {
662 }
663 EXPORT_SYMBOL_GPL(mt7615_sta_ps);
664 
665 static u16
666 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
667 		       struct mt76_phy *mphy,
668 		       const struct ieee80211_tx_rate *rate,
669 		       bool stbc, u8 *bw)
670 {
671 	u8 phy, nss, rate_idx;
672 	u16 rateval = 0;
673 
674 	*bw = 0;
675 
676 	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
677 		rate_idx = ieee80211_rate_get_vht_mcs(rate);
678 		nss = ieee80211_rate_get_vht_nss(rate);
679 		phy = MT_PHY_TYPE_VHT;
680 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
681 			*bw = 1;
682 		else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
683 			*bw = 2;
684 		else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
685 			*bw = 3;
686 	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
687 		rate_idx = rate->idx;
688 		nss = 1 + (rate->idx >> 3);
689 		phy = MT_PHY_TYPE_HT;
690 		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
691 			phy = MT_PHY_TYPE_HT_GF;
692 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
693 			*bw = 1;
694 	} else {
695 		const struct ieee80211_rate *r;
696 		int band = mphy->chandef.chan->band;
697 		u16 val;
698 
699 		nss = 1;
700 		r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx];
701 		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
702 			val = r->hw_value_short;
703 		else
704 			val = r->hw_value;
705 
706 		phy = val >> 8;
707 		rate_idx = val & 0xff;
708 	}
709 
710 	if (stbc && nss == 1) {
711 		nss++;
712 		rateval |= MT_TX_RATE_STBC;
713 	}
714 
715 	rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
716 		    FIELD_PREP(MT_TX_RATE_MODE, phy) |
717 		    FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
718 
719 	return rateval;
720 }
721 
722 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
723 			  struct sk_buff *skb, struct mt76_wcid *wcid,
724 			  struct ieee80211_sta *sta, int pid,
725 			  struct ieee80211_key_conf *key,
726 			  enum mt76_txq_id qid, bool beacon)
727 {
728 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
729 	u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
730 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
731 	struct ieee80211_tx_rate *rate = &info->control.rates[0];
732 	u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
733 	bool multicast = is_multicast_ether_addr(hdr->addr1);
734 	struct ieee80211_vif *vif = info->control.vif;
735 	bool is_mmio = mt76_is_mmio(&dev->mt76);
736 	u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE;
737 	struct mt76_phy *mphy = &dev->mphy;
738 	__le16 fc = hdr->frame_control;
739 	int tx_count = 8;
740 	u16 seqno = 0;
741 
742 	if (vif) {
743 		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
744 
745 		omac_idx = mvif->omac_idx;
746 		wmm_idx = mvif->wmm_idx;
747 	}
748 
749 	if (sta) {
750 		struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
751 
752 		tx_count = msta->rate_count;
753 	}
754 
755 	if (phy_idx && dev->mt76.phys[MT_BAND1])
756 		mphy = dev->mt76.phys[MT_BAND1];
757 
758 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
759 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
760 
761 	if (beacon) {
762 		p_fmt = MT_TX_TYPE_FW;
763 		q_idx = phy_idx ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
764 	} else if (qid >= MT_TXQ_PSD) {
765 		p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
766 		q_idx = phy_idx ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
767 	} else {
768 		p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
769 		q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
770 			mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
771 	}
772 
773 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
774 	      FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
775 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
776 	txwi[0] = cpu_to_le32(val);
777 
778 	val = MT_TXD1_LONG_FORMAT |
779 	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
780 	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
781 	      FIELD_PREP(MT_TXD1_HDR_INFO,
782 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
783 	      FIELD_PREP(MT_TXD1_TID,
784 			 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
785 	      FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
786 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
787 	txwi[1] = cpu_to_le32(val);
788 
789 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
790 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
791 	      FIELD_PREP(MT_TXD2_MULTICAST, multicast);
792 	if (key) {
793 		if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
794 		    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
795 			val |= MT_TXD2_BIP;
796 			txwi[3] = 0;
797 		} else {
798 			txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
799 		}
800 	} else {
801 		txwi[3] = 0;
802 	}
803 	txwi[2] = cpu_to_le32(val);
804 
805 	if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
806 		txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
807 
808 	txwi[4] = 0;
809 	txwi[6] = 0;
810 
811 	if (rate->idx >= 0 && rate->count &&
812 	    !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
813 		bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
814 		u8 bw;
815 		u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc,
816 						     &bw);
817 
818 		txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
819 
820 		val = MT_TXD6_FIXED_BW |
821 		      FIELD_PREP(MT_TXD6_BW, bw) |
822 		      FIELD_PREP(MT_TXD6_TX_RATE, rateval);
823 		txwi[6] |= cpu_to_le32(val);
824 
825 		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
826 			txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
827 
828 		if (info->flags & IEEE80211_TX_CTL_LDPC)
829 			txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
830 
831 		if (!(rate->flags & (IEEE80211_TX_RC_MCS |
832 				     IEEE80211_TX_RC_VHT_MCS)))
833 			txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
834 
835 		tx_count = rate->count;
836 	}
837 
838 	if (!ieee80211_is_beacon(fc)) {
839 		struct ieee80211_hw *hw = mt76_hw(dev);
840 
841 		val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid);
842 		if (!ieee80211_hw_check(hw, SUPPORTS_PS))
843 			val |= MT_TXD5_SW_POWER_MGMT;
844 		txwi[5] = cpu_to_le32(val);
845 	} else {
846 		txwi[5] = 0;
847 		/* use maximum tx count for beacons */
848 		tx_count = 0x1f;
849 	}
850 
851 	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
852 	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
853 		seqno = le16_to_cpu(hdr->seq_ctrl);
854 
855 		if (ieee80211_is_back_req(hdr->frame_control)) {
856 			struct ieee80211_bar *bar;
857 
858 			bar = (struct ieee80211_bar *)skb->data;
859 			seqno = le16_to_cpu(bar->start_seq_num);
860 		}
861 
862 		val |= MT_TXD3_SN_VALID |
863 		       FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
864 	}
865 
866 	txwi[3] |= cpu_to_le32(val);
867 
868 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
869 		txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
870 
871 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
872 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
873 	      FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
874 	txwi[7] = cpu_to_le32(val);
875 	if (!is_mmio) {
876 		val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
877 		      FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
878 		txwi[8] = cpu_to_le32(val);
879 	}
880 
881 	return 0;
882 }
883 EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi);
884 
885 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
886 {
887 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
888 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
889 
890 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
891 			 0, 5000);
892 }
893 
894 void mt7615_mac_sta_poll(struct mt7615_dev *dev)
895 {
896 	static const u8 ac_to_tid[4] = {
897 		[IEEE80211_AC_BE] = 0,
898 		[IEEE80211_AC_BK] = 1,
899 		[IEEE80211_AC_VI] = 4,
900 		[IEEE80211_AC_VO] = 6
901 	};
902 	static const u8 hw_queue_map[] = {
903 		[IEEE80211_AC_BK] = 0,
904 		[IEEE80211_AC_BE] = 1,
905 		[IEEE80211_AC_VI] = 2,
906 		[IEEE80211_AC_VO] = 3,
907 	};
908 	struct ieee80211_sta *sta;
909 	struct mt7615_sta *msta;
910 	u32 addr, tx_time[4], rx_time[4];
911 	struct list_head sta_poll_list;
912 	int i;
913 
914 	INIT_LIST_HEAD(&sta_poll_list);
915 	spin_lock_bh(&dev->sta_poll_lock);
916 	list_splice_init(&dev->sta_poll_list, &sta_poll_list);
917 	spin_unlock_bh(&dev->sta_poll_lock);
918 
919 	while (!list_empty(&sta_poll_list)) {
920 		bool clear = false;
921 
922 		msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
923 					poll_list);
924 		list_del_init(&msta->poll_list);
925 
926 		addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
927 
928 		for (i = 0; i < 4; i++, addr += 8) {
929 			u32 tx_last = msta->airtime_ac[i];
930 			u32 rx_last = msta->airtime_ac[i + 4];
931 
932 			msta->airtime_ac[i] = mt76_rr(dev, addr);
933 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
934 			tx_time[i] = msta->airtime_ac[i] - tx_last;
935 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
936 
937 			if ((tx_last | rx_last) & BIT(30))
938 				clear = true;
939 		}
940 
941 		if (clear) {
942 			mt7615_mac_wtbl_update(dev, msta->wcid.idx,
943 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
944 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
945 		}
946 
947 		if (!msta->wcid.sta)
948 			continue;
949 
950 		sta = container_of((void *)msta, struct ieee80211_sta,
951 				   drv_priv);
952 		for (i = 0; i < 4; i++) {
953 			u32 tx_cur = tx_time[i];
954 			u32 rx_cur = rx_time[hw_queue_map[i]];
955 			u8 tid = ac_to_tid[i];
956 
957 			if (!tx_cur && !rx_cur)
958 				continue;
959 
960 			ieee80211_sta_register_airtime(sta, tid, tx_cur,
961 						       rx_cur);
962 		}
963 	}
964 }
965 EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll);
966 
967 static void
968 mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta,
969 			    struct ieee80211_tx_rate *probe_rate,
970 			    struct ieee80211_tx_rate *rates,
971 			    struct mt7615_rate_desc *rd)
972 {
973 	struct mt7615_dev *dev = phy->dev;
974 	struct mt76_phy *mphy = phy->mt76;
975 	struct ieee80211_tx_rate *ref;
976 	bool rateset, stbc = false;
977 	int n_rates = sta->n_rates;
978 	u8 bw, bw_prev;
979 	int i, j;
980 
981 	for (i = n_rates; i < 4; i++)
982 		rates[i] = rates[n_rates - 1];
983 
984 	rateset = !(sta->rate_set_tsf & BIT(0));
985 	memcpy(sta->rateset[rateset].rates, rates,
986 	       sizeof(sta->rateset[rateset].rates));
987 	if (probe_rate) {
988 		sta->rateset[rateset].probe_rate = *probe_rate;
989 		ref = &sta->rateset[rateset].probe_rate;
990 	} else {
991 		sta->rateset[rateset].probe_rate.idx = -1;
992 		ref = &sta->rateset[rateset].rates[0];
993 	}
994 
995 	rates = sta->rateset[rateset].rates;
996 	for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
997 		/*
998 		 * We don't support switching between short and long GI
999 		 * within the rate set. For accurate tx status reporting, we
1000 		 * need to make sure that flags match.
1001 		 * For improved performance, avoid duplicate entries by
1002 		 * decrementing the MCS index if necessary
1003 		 */
1004 		if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
1005 			rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
1006 
1007 		for (j = 0; j < i; j++) {
1008 			if (rates[i].idx != rates[j].idx)
1009 				continue;
1010 			if ((rates[i].flags ^ rates[j].flags) &
1011 			    (IEEE80211_TX_RC_40_MHZ_WIDTH |
1012 			     IEEE80211_TX_RC_80_MHZ_WIDTH |
1013 			     IEEE80211_TX_RC_160_MHZ_WIDTH))
1014 				continue;
1015 
1016 			if (!rates[i].idx)
1017 				continue;
1018 
1019 			rates[i].idx--;
1020 		}
1021 	}
1022 
1023 	rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
1024 	bw_prev = bw;
1025 
1026 	if (probe_rate) {
1027 		rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
1028 						       stbc, &bw);
1029 		if (bw)
1030 			rd->bw_idx = 1;
1031 		else
1032 			bw_prev = 0;
1033 	} else {
1034 		rd->probe_val = rd->val[0];
1035 	}
1036 
1037 	rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
1038 	if (bw_prev) {
1039 		rd->bw_idx = 3;
1040 		bw_prev = bw;
1041 	}
1042 
1043 	rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
1044 	if (bw_prev) {
1045 		rd->bw_idx = 5;
1046 		bw_prev = bw;
1047 	}
1048 
1049 	rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
1050 	if (bw_prev)
1051 		rd->bw_idx = 7;
1052 
1053 	rd->rateset = rateset;
1054 	rd->bw = bw;
1055 }
1056 
1057 static int
1058 mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
1059 			     struct ieee80211_tx_rate *probe_rate,
1060 			     struct ieee80211_tx_rate *rates)
1061 {
1062 	struct mt7615_dev *dev = phy->dev;
1063 	struct mt7615_wtbl_rate_desc *wrd;
1064 
1065 	if (work_pending(&dev->rate_work))
1066 		return -EBUSY;
1067 
1068 	wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC);
1069 	if (!wrd)
1070 		return -ENOMEM;
1071 
1072 	wrd->sta = sta;
1073 	mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates,
1074 				    &wrd->rate);
1075 	list_add_tail(&wrd->node, &dev->wrd_head);
1076 	queue_work(dev->mt76.wq, &dev->rate_work);
1077 
1078 	return 0;
1079 }
1080 
1081 u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
1082 {
1083 	u32 addr, val, val2;
1084 	u8 offset;
1085 
1086 	addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4;
1087 
1088 	offset = tid * 12;
1089 	addr += 4 * (offset / 32);
1090 	offset %= 32;
1091 
1092 	val = mt76_rr(dev, addr);
1093 	val >>= offset;
1094 
1095 	if (offset > 20) {
1096 		addr += 4;
1097 		val2 = mt76_rr(dev, addr);
1098 		val |= val2 << (32 - offset);
1099 	}
1100 
1101 	return val & GENMASK(11, 0);
1102 }
1103 
1104 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
1105 			  struct ieee80211_tx_rate *probe_rate,
1106 			  struct ieee80211_tx_rate *rates)
1107 {
1108 	int wcid = sta->wcid.idx, n_rates = sta->n_rates;
1109 	struct mt7615_dev *dev = phy->dev;
1110 	struct mt7615_rate_desc rd;
1111 	u32 w5, w27, addr;
1112 	u16 idx = sta->vif->mt76.omac_idx;
1113 
1114 	if (!mt76_is_mmio(&dev->mt76)) {
1115 		mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
1116 		return;
1117 	}
1118 
1119 	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
1120 		return;
1121 
1122 	memset(&rd, 0, sizeof(struct mt7615_rate_desc));
1123 	mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd);
1124 
1125 	addr = mt7615_mac_wtbl_addr(dev, wcid);
1126 	w27 = mt76_rr(dev, addr + 27 * 4);
1127 	w27 &= ~MT_WTBL_W27_CC_BW_SEL;
1128 	w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw);
1129 
1130 	w5 = mt76_rr(dev, addr + 5 * 4);
1131 	w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
1132 		MT_WTBL_W5_MPDU_OK_COUNT |
1133 		MT_WTBL_W5_MPDU_FAIL_COUNT |
1134 		MT_WTBL_W5_RATE_IDX);
1135 	w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) |
1136 	      FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
1137 			 rd.bw_idx ? rd.bw_idx - 1 : 7);
1138 
1139 	mt76_wr(dev, MT_WTBL_RIUCR0, w5);
1140 
1141 	mt76_wr(dev, MT_WTBL_RIUCR1,
1142 		FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) |
1143 		FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) |
1144 		FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1]));
1145 
1146 	mt76_wr(dev, MT_WTBL_RIUCR2,
1147 		FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) |
1148 		FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) |
1149 		FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) |
1150 		FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2]));
1151 
1152 	mt76_wr(dev, MT_WTBL_RIUCR3,
1153 		FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) |
1154 		FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) |
1155 		FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3]));
1156 
1157 	mt76_wr(dev, MT_WTBL_UPDATE,
1158 		FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
1159 		MT_WTBL_UPDATE_RATE_UPDATE |
1160 		MT_WTBL_UPDATE_TX_COUNT_CLEAR);
1161 
1162 	mt76_wr(dev, addr + 27 * 4, w27);
1163 
1164 	idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
1165 	addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
1166 
1167 	mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
1168 	sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
1169 	sta->rate_set_tsf |= rd.rateset;
1170 
1171 	if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
1172 		mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
1173 
1174 	sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
1175 	sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
1176 	sta->rate_probe = !!probe_rate;
1177 }
1178 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
1179 
1180 static int
1181 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1182 			   struct ieee80211_key_conf *key,
1183 			   enum mt76_cipher_type cipher, u16 cipher_mask,
1184 			   enum set_key_cmd cmd)
1185 {
1186 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
1187 	u8 data[32] = {};
1188 
1189 	if (key->keylen > sizeof(data))
1190 		return -EINVAL;
1191 
1192 	mt76_rr_copy(dev, addr, data, sizeof(data));
1193 	if (cmd == SET_KEY) {
1194 		if (cipher == MT_CIPHER_TKIP) {
1195 			/* Rx/Tx MIC keys are swapped */
1196 			memcpy(data, key->key, 16);
1197 			memcpy(data + 16, key->key + 24, 8);
1198 			memcpy(data + 24, key->key + 16, 8);
1199 		} else {
1200 			if (cipher_mask == BIT(cipher))
1201 				memcpy(data, key->key, key->keylen);
1202 			else if (cipher != MT_CIPHER_BIP_CMAC_128)
1203 				memcpy(data, key->key, 16);
1204 			if (cipher == MT_CIPHER_BIP_CMAC_128)
1205 				memcpy(data + 16, key->key, 16);
1206 		}
1207 	} else {
1208 		if (cipher == MT_CIPHER_BIP_CMAC_128)
1209 			memset(data + 16, 0, 16);
1210 		else if (cipher_mask)
1211 			memset(data, 0, 16);
1212 		if (!cipher_mask)
1213 			memset(data, 0, sizeof(data));
1214 	}
1215 
1216 	mt76_wr_copy(dev, addr, data, sizeof(data));
1217 
1218 	return 0;
1219 }
1220 
1221 static int
1222 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1223 			  enum mt76_cipher_type cipher, u16 cipher_mask,
1224 			  int keyidx, enum set_key_cmd cmd)
1225 {
1226 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
1227 
1228 	if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
1229 		return -ETIMEDOUT;
1230 
1231 	w0 = mt76_rr(dev, addr);
1232 	w1 = mt76_rr(dev, addr + 4);
1233 
1234 	if (cipher_mask)
1235 		w0 |= MT_WTBL_W0_RX_KEY_VALID;
1236 	else
1237 		w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
1238 	if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
1239 		w0 |= MT_WTBL_W0_RX_IK_VALID;
1240 	else
1241 		w0 &= ~MT_WTBL_W0_RX_IK_VALID;
1242 
1243 	if (cmd == SET_KEY &&
1244 	    (cipher != MT_CIPHER_BIP_CMAC_128 ||
1245 	     cipher_mask == BIT(cipher))) {
1246 		w0 &= ~MT_WTBL_W0_KEY_IDX;
1247 		w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
1248 	}
1249 
1250 	mt76_wr(dev, MT_WTBL_RICR0, w0);
1251 	mt76_wr(dev, MT_WTBL_RICR1, w1);
1252 
1253 	if (!mt7615_mac_wtbl_update(dev, wcid->idx,
1254 				    MT_WTBL_UPDATE_RXINFO_UPDATE))
1255 		return -ETIMEDOUT;
1256 
1257 	return 0;
1258 }
1259 
1260 static void
1261 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1262 			      enum mt76_cipher_type cipher, u16 cipher_mask,
1263 			      enum set_key_cmd cmd)
1264 {
1265 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
1266 
1267 	if (!cipher_mask) {
1268 		mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
1269 		return;
1270 	}
1271 
1272 	if (cmd != SET_KEY)
1273 		return;
1274 
1275 	if (cipher == MT_CIPHER_BIP_CMAC_128 &&
1276 	    cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
1277 		return;
1278 
1279 	mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
1280 		 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
1281 }
1282 
1283 int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
1284 			      struct mt76_wcid *wcid,
1285 			      struct ieee80211_key_conf *key,
1286 			      enum set_key_cmd cmd)
1287 {
1288 	enum mt76_cipher_type cipher;
1289 	u16 cipher_mask = wcid->cipher;
1290 	int err;
1291 
1292 	cipher = mt7615_mac_get_cipher(key->cipher);
1293 	if (cipher == MT_CIPHER_NONE)
1294 		return -EOPNOTSUPP;
1295 
1296 	if (cmd == SET_KEY)
1297 		cipher_mask |= BIT(cipher);
1298 	else
1299 		cipher_mask &= ~BIT(cipher);
1300 
1301 	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
1302 	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
1303 					 cmd);
1304 	if (err < 0)
1305 		return err;
1306 
1307 	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
1308 					key->keyidx, cmd);
1309 	if (err < 0)
1310 		return err;
1311 
1312 	wcid->cipher = cipher_mask;
1313 
1314 	return 0;
1315 }
1316 
1317 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
1318 			    struct mt76_wcid *wcid,
1319 			    struct ieee80211_key_conf *key,
1320 			    enum set_key_cmd cmd)
1321 {
1322 	int err;
1323 
1324 	spin_lock_bh(&dev->mt76.lock);
1325 	err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
1326 	spin_unlock_bh(&dev->mt76.lock);
1327 
1328 	return err;
1329 }
1330 
1331 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
1332 			    struct ieee80211_tx_info *info, __le32 *txs_data)
1333 {
1334 	struct ieee80211_supported_band *sband;
1335 	struct mt7615_rate_set *rs;
1336 	struct mt76_phy *mphy;
1337 	int first_idx = 0, last_idx;
1338 	int i, idx, count;
1339 	bool fixed_rate, ack_timeout;
1340 	bool ampdu, cck = false;
1341 	bool rs_idx;
1342 	u32 rate_set_tsf;
1343 	u32 final_rate, final_rate_flags, final_nss, txs;
1344 
1345 	txs = le32_to_cpu(txs_data[1]);
1346 	ampdu = txs & MT_TXS1_AMPDU;
1347 
1348 	txs = le32_to_cpu(txs_data[3]);
1349 	count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
1350 	last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
1351 
1352 	txs = le32_to_cpu(txs_data[0]);
1353 	fixed_rate = txs & MT_TXS0_FIXED_RATE;
1354 	final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1355 	ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
1356 
1357 	if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
1358 		return false;
1359 
1360 	if (txs & MT_TXS0_QUEUE_TIMEOUT)
1361 		return false;
1362 
1363 	if (!ack_timeout)
1364 		info->flags |= IEEE80211_TX_STAT_ACK;
1365 
1366 	info->status.ampdu_len = 1;
1367 	info->status.ampdu_ack_len = !!(info->flags &
1368 					IEEE80211_TX_STAT_ACK);
1369 
1370 	if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
1371 		info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
1372 
1373 	first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
1374 
1375 	if (fixed_rate) {
1376 		info->status.rates[0].count = count;
1377 		i = 0;
1378 		goto out;
1379 	}
1380 
1381 	rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
1382 	rs_idx = !((u32)(le32_get_bits(txs_data[4], MT_TXS4_F0_TIMESTAMP) -
1383 			 rate_set_tsf) < 1000000);
1384 	rs_idx ^= rate_set_tsf & BIT(0);
1385 	rs = &sta->rateset[rs_idx];
1386 
1387 	if (!first_idx && rs->probe_rate.idx >= 0) {
1388 		info->status.rates[0] = rs->probe_rate;
1389 
1390 		spin_lock_bh(&dev->mt76.lock);
1391 		if (sta->rate_probe) {
1392 			struct mt7615_phy *phy = &dev->phy;
1393 
1394 			if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
1395 				phy = dev->mt76.phys[MT_BAND1]->priv;
1396 
1397 			mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
1398 		}
1399 		spin_unlock_bh(&dev->mt76.lock);
1400 	} else {
1401 		info->status.rates[0] = rs->rates[first_idx / 2];
1402 	}
1403 	info->status.rates[0].count = 0;
1404 
1405 	for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
1406 		struct ieee80211_tx_rate *cur_rate;
1407 		int cur_count;
1408 
1409 		cur_rate = &rs->rates[idx / 2];
1410 		cur_count = min_t(int, MT7615_RATE_RETRY, count);
1411 		count -= cur_count;
1412 
1413 		if (idx && (cur_rate->idx != info->status.rates[i].idx ||
1414 			    cur_rate->flags != info->status.rates[i].flags)) {
1415 			i++;
1416 			if (i == ARRAY_SIZE(info->status.rates)) {
1417 				i--;
1418 				break;
1419 			}
1420 
1421 			info->status.rates[i] = *cur_rate;
1422 			info->status.rates[i].count = 0;
1423 		}
1424 
1425 		info->status.rates[i].count += cur_count;
1426 	}
1427 
1428 out:
1429 	final_rate_flags = info->status.rates[i].flags;
1430 
1431 	switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
1432 	case MT_PHY_TYPE_CCK:
1433 		cck = true;
1434 		fallthrough;
1435 	case MT_PHY_TYPE_OFDM:
1436 		mphy = &dev->mphy;
1437 		if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1])
1438 			mphy = dev->mt76.phys[MT_BAND1];
1439 
1440 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1441 			sband = &mphy->sband_5g.sband;
1442 		else
1443 			sband = &mphy->sband_2g.sband;
1444 		final_rate &= MT_TX_RATE_IDX;
1445 		final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
1446 					   cck);
1447 		final_rate_flags = 0;
1448 		break;
1449 	case MT_PHY_TYPE_HT_GF:
1450 	case MT_PHY_TYPE_HT:
1451 		final_rate_flags |= IEEE80211_TX_RC_MCS;
1452 		final_rate &= MT_TX_RATE_IDX;
1453 		if (final_rate > 31)
1454 			return false;
1455 		break;
1456 	case MT_PHY_TYPE_VHT:
1457 		final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
1458 
1459 		if ((final_rate & MT_TX_RATE_STBC) && final_nss)
1460 			final_nss--;
1461 
1462 		final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
1463 		final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
1464 		break;
1465 	default:
1466 		return false;
1467 	}
1468 
1469 	info->status.rates[i].idx = final_rate;
1470 	info->status.rates[i].flags = final_rate_flags;
1471 
1472 	return true;
1473 }
1474 
1475 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
1476 				   struct mt7615_sta *sta, int pid,
1477 				   __le32 *txs_data)
1478 {
1479 	struct mt76_dev *mdev = &dev->mt76;
1480 	struct sk_buff_head list;
1481 	struct sk_buff *skb;
1482 
1483 	if (pid < MT_PACKET_ID_FIRST)
1484 		return false;
1485 
1486 	trace_mac_txdone(mdev, sta->wcid.idx, pid);
1487 
1488 	mt76_tx_status_lock(mdev, &list);
1489 	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
1490 	if (skb) {
1491 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1492 
1493 		if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
1494 			info->status.rates[0].count = 0;
1495 			info->status.rates[0].idx = -1;
1496 		}
1497 
1498 		mt76_tx_status_skb_done(mdev, skb, &list);
1499 	}
1500 	mt76_tx_status_unlock(mdev, &list);
1501 
1502 	return !!skb;
1503 }
1504 
1505 static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
1506 {
1507 	struct ieee80211_tx_info info = {};
1508 	struct ieee80211_sta *sta = NULL;
1509 	struct mt7615_sta *msta = NULL;
1510 	struct mt76_wcid *wcid;
1511 	struct mt76_phy *mphy = &dev->mt76.phy;
1512 	__le32 *txs_data = data;
1513 	u8 wcidx;
1514 	u8 pid;
1515 
1516 	pid = le32_get_bits(txs_data[0], MT_TXS0_PID);
1517 	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1518 
1519 	if (pid == MT_PACKET_ID_NO_ACK)
1520 		return;
1521 
1522 	if (wcidx >= MT7615_WTBL_SIZE)
1523 		return;
1524 
1525 	rcu_read_lock();
1526 
1527 	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1528 	if (!wcid)
1529 		goto out;
1530 
1531 	msta = container_of(wcid, struct mt7615_sta, wcid);
1532 	sta = wcid_to_sta(wcid);
1533 
1534 	spin_lock_bh(&dev->sta_poll_lock);
1535 	if (list_empty(&msta->poll_list))
1536 		list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1537 	spin_unlock_bh(&dev->sta_poll_lock);
1538 
1539 	if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
1540 		goto out;
1541 
1542 	if (wcidx >= MT7615_WTBL_STA || !sta)
1543 		goto out;
1544 
1545 	if (wcid->phy_idx && dev->mt76.phys[MT_BAND1])
1546 		mphy = dev->mt76.phys[MT_BAND1];
1547 
1548 	if (mt7615_fill_txs(dev, msta, &info, txs_data))
1549 		ieee80211_tx_status_noskb(mphy->hw, sta, &info);
1550 
1551 out:
1552 	rcu_read_unlock();
1553 }
1554 
1555 static void
1556 mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi)
1557 {
1558 	struct mt76_dev *mdev = &dev->mt76;
1559 	__le32 *txwi_data;
1560 	u32 val;
1561 	u8 wcid;
1562 
1563 	mt76_connac_txp_skb_unmap(mdev, txwi);
1564 	if (!txwi->skb)
1565 		goto out;
1566 
1567 	txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi);
1568 	val = le32_to_cpu(txwi_data[1]);
1569 	wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val);
1570 	mt76_tx_complete_skb(mdev, wcid, txwi->skb);
1571 
1572 out:
1573 	txwi->skb = NULL;
1574 	mt76_put_txwi(mdev, txwi);
1575 }
1576 
1577 static void
1578 mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
1579 {
1580 	struct mt76_dev *mdev = &dev->mt76;
1581 	struct mt76_txwi_cache *txwi;
1582 
1583 	trace_mac_tx_free(dev, token);
1584 	txwi = mt76_token_put(mdev, token);
1585 	if (!txwi)
1586 		return;
1587 
1588 	mt7615_txwi_free(dev, txwi);
1589 }
1590 
1591 static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
1592 {
1593 	struct mt76_connac_tx_free *free = data;
1594 	void *tx_token = data + sizeof(*free);
1595 	void *end = data + len;
1596 	u8 i, count;
1597 
1598 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1599 	if (is_mt7615(&dev->mt76)) {
1600 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1601 	} else {
1602 		for (i = 0; i < IEEE80211_NUM_ACS; i++)
1603 			mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
1604 	}
1605 
1606 	count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT);
1607 	if (is_mt7615(&dev->mt76)) {
1608 		__le16 *token = tx_token;
1609 
1610 		if (WARN_ON_ONCE((void *)&token[count] > end))
1611 			return;
1612 
1613 		for (i = 0; i < count; i++)
1614 			mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
1615 	} else {
1616 		__le32 *token = tx_token;
1617 
1618 		if (WARN_ON_ONCE((void *)&token[count] > end))
1619 			return;
1620 
1621 		for (i = 0; i < count; i++)
1622 			mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
1623 	}
1624 
1625 	rcu_read_lock();
1626 	mt7615_mac_sta_poll(dev);
1627 	rcu_read_unlock();
1628 
1629 	mt76_worker_schedule(&dev->mt76.tx_worker);
1630 }
1631 
1632 bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
1633 {
1634 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
1635 	__le32 *rxd = (__le32 *)data;
1636 	__le32 *end = (__le32 *)&rxd[len / 4];
1637 	enum rx_pkt_type type;
1638 
1639 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1640 
1641 	switch (type) {
1642 	case PKT_TYPE_TXRX_NOTIFY:
1643 		mt7615_mac_tx_free(dev, data, len);
1644 		return false;
1645 	case PKT_TYPE_TXS:
1646 		for (rxd++; rxd + 7 <= end; rxd += 7)
1647 			mt7615_mac_add_txs(dev, rxd);
1648 		return false;
1649 	default:
1650 		return true;
1651 	}
1652 }
1653 EXPORT_SYMBOL_GPL(mt7615_rx_check);
1654 
1655 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1656 			 struct sk_buff *skb)
1657 {
1658 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
1659 	__le32 *rxd = (__le32 *)skb->data;
1660 	__le32 *end = (__le32 *)&skb->data[skb->len];
1661 	enum rx_pkt_type type;
1662 	u16 flag;
1663 
1664 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1665 	flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
1666 	if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
1667 		type = PKT_TYPE_NORMAL_MCU;
1668 
1669 	switch (type) {
1670 	case PKT_TYPE_TXS:
1671 		for (rxd++; rxd + 7 <= end; rxd += 7)
1672 			mt7615_mac_add_txs(dev, rxd);
1673 		dev_kfree_skb(skb);
1674 		break;
1675 	case PKT_TYPE_TXRX_NOTIFY:
1676 		mt7615_mac_tx_free(dev, skb->data, skb->len);
1677 		dev_kfree_skb(skb);
1678 		break;
1679 	case PKT_TYPE_RX_EVENT:
1680 		mt7615_mcu_rx_event(dev, skb);
1681 		break;
1682 	case PKT_TYPE_NORMAL_MCU:
1683 	case PKT_TYPE_NORMAL:
1684 		if (!mt7615_mac_fill_rx(dev, skb)) {
1685 			mt76_rx(&dev->mt76, q, skb);
1686 			return;
1687 		}
1688 		fallthrough;
1689 	default:
1690 		dev_kfree_skb(skb);
1691 		break;
1692 	}
1693 }
1694 EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb);
1695 
1696 static void
1697 mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm)
1698 {
1699 	struct mt7615_dev *dev = phy->dev;
1700 	bool ext_phy = phy != &dev->phy;
1701 
1702 	if (is_mt7663(&dev->mt76)) {
1703 		if (ofdm)
1704 			mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy),
1705 				 MT_WF_PHY_PD_OFDM_MASK(0),
1706 				 MT_WF_PHY_PD_OFDM(0, val));
1707 		else
1708 			mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy),
1709 				 MT_WF_PHY_PD_CCK_MASK(ext_phy),
1710 				 MT_WF_PHY_PD_CCK(ext_phy, val));
1711 		return;
1712 	}
1713 
1714 	if (ofdm)
1715 		mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
1716 			 MT_WF_PHY_PD_OFDM_MASK(ext_phy),
1717 			 MT_WF_PHY_PD_OFDM(ext_phy, val));
1718 	else
1719 		mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
1720 			 MT_WF_PHY_PD_CCK_MASK(ext_phy),
1721 			 MT_WF_PHY_PD_CCK(ext_phy, val));
1722 }
1723 
1724 static void
1725 mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
1726 {
1727 	/* ofdm */
1728 	mt7615_mac_set_sensitivity(phy, 0x13c, true);
1729 	/* cck */
1730 	mt7615_mac_set_sensitivity(phy, 0x92, false);
1731 
1732 	phy->ofdm_sensitivity = -98;
1733 	phy->cck_sensitivity = -110;
1734 	phy->last_cca_adj = jiffies;
1735 }
1736 
1737 void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
1738 {
1739 	struct mt7615_dev *dev = phy->dev;
1740 	bool ext_phy = phy != &dev->phy;
1741 	u32 reg, mask;
1742 
1743 	mt7615_mutex_acquire(dev);
1744 
1745 	if (phy->scs_en == enable)
1746 		goto out;
1747 
1748 	if (is_mt7663(&dev->mt76)) {
1749 		reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy);
1750 		mask = MT_WF_PHY_PD_BLK(0);
1751 	} else {
1752 		reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy);
1753 		mask = MT_WF_PHY_PD_BLK(ext_phy);
1754 	}
1755 
1756 	if (enable) {
1757 		mt76_set(dev, reg, mask);
1758 		if (is_mt7622(&dev->mt76)) {
1759 			mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8);
1760 			mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7);
1761 		}
1762 	} else {
1763 		mt76_clear(dev, reg, mask);
1764 	}
1765 
1766 	mt7615_mac_set_default_sensitivity(phy);
1767 	phy->scs_en = enable;
1768 
1769 out:
1770 	mt7615_mutex_release(dev);
1771 }
1772 
1773 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
1774 {
1775 	u32 rxtd, reg;
1776 
1777 	if (is_mt7663(&dev->mt76))
1778 		reg = MT7663_WF_PHY_R0_PHYMUX_5;
1779 	else
1780 		reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
1781 
1782 	if (ext_phy)
1783 		rxtd = MT_WF_PHY_RXTD2(10);
1784 	else
1785 		rxtd = MT_WF_PHY_RXTD(12);
1786 
1787 	mt76_set(dev, rxtd, BIT(18) | BIT(29));
1788 	mt76_set(dev, reg, 0x5 << 12);
1789 }
1790 
1791 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy)
1792 {
1793 	struct mt7615_dev *dev = phy->dev;
1794 	bool ext_phy = phy != &dev->phy;
1795 	u32 reg;
1796 
1797 	if (is_mt7663(&dev->mt76))
1798 		reg = MT7663_WF_PHY_R0_PHYMUX_5;
1799 	else
1800 		reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
1801 
1802 	/* reset PD and MDRDY counters */
1803 	mt76_clear(dev, reg, GENMASK(22, 20));
1804 	mt76_set(dev, reg, BIT(22) | BIT(20));
1805 }
1806 
1807 static void
1808 mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
1809 			      u32 rts_err_rate, bool ofdm)
1810 {
1811 	struct mt7615_dev *dev = phy->dev;
1812 	int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
1813 	bool ext_phy = phy != &dev->phy;
1814 	s16 def_th = ofdm ? -98 : -110;
1815 	bool update = false;
1816 	s8 *sensitivity;
1817 	int signal;
1818 
1819 	sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity;
1820 	signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy);
1821 	if (!signal) {
1822 		mt7615_mac_set_default_sensitivity(phy);
1823 		return;
1824 	}
1825 
1826 	signal = min(signal, -72);
1827 	if (false_cca > 500) {
1828 		if (rts_err_rate > MT_FRAC(40, 100))
1829 			return;
1830 
1831 		/* decrease coverage */
1832 		if (*sensitivity == def_th && signal > -90) {
1833 			*sensitivity = -90;
1834 			update = true;
1835 		} else if (*sensitivity + 2 < signal) {
1836 			*sensitivity += 2;
1837 			update = true;
1838 		}
1839 	} else if ((false_cca > 0 && false_cca < 50) ||
1840 		   rts_err_rate > MT_FRAC(60, 100)) {
1841 		/* increase coverage */
1842 		if (*sensitivity - 2 >= def_th) {
1843 			*sensitivity -= 2;
1844 			update = true;
1845 		}
1846 	}
1847 
1848 	if (*sensitivity > signal) {
1849 		*sensitivity = signal;
1850 		update = true;
1851 	}
1852 
1853 	if (update) {
1854 		u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256;
1855 
1856 		mt7615_mac_set_sensitivity(phy, val, ofdm);
1857 		phy->last_cca_adj = jiffies;
1858 	}
1859 }
1860 
1861 static void
1862 mt7615_mac_scs_check(struct mt7615_phy *phy)
1863 {
1864 	struct mt7615_dev *dev = phy->dev;
1865 	struct mib_stats *mib = &phy->mib;
1866 	u32 val, rts_err_rate = 0;
1867 	u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
1868 	bool ext_phy = phy != &dev->phy;
1869 
1870 	if (!phy->scs_en)
1871 		return;
1872 
1873 	if (is_mt7663(&dev->mt76))
1874 		val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
1875 	else
1876 		val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
1877 	pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
1878 	pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
1879 
1880 	if (is_mt7663(&dev->mt76))
1881 		val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
1882 	else
1883 		val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
1884 	mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
1885 	mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
1886 
1887 	phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1888 	phy->false_cca_cck = pd_cck - mdrdy_cck;
1889 	mt7615_mac_cca_stats_reset(phy);
1890 
1891 	if (mib->rts_cnt + mib->rts_retries_cnt)
1892 		rts_err_rate = MT_FRAC(mib->rts_retries_cnt,
1893 				       mib->rts_cnt + mib->rts_retries_cnt);
1894 
1895 	/* cck */
1896 	mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false);
1897 	/* ofdm */
1898 	mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true);
1899 
1900 	if (time_after(jiffies, phy->last_cca_adj + 10 * HZ))
1901 		mt7615_mac_set_default_sensitivity(phy);
1902 }
1903 
1904 static u8
1905 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx)
1906 {
1907 	static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1908 	u32 reg, val, sum = 0, n = 0;
1909 	int i;
1910 
1911 	if (is_mt7663(&dev->mt76))
1912 		reg = MT7663_WF_PHY_RXTD(20);
1913 	else
1914 		reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
1915 
1916 	for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1917 		val = mt76_rr(dev, reg);
1918 		sum += val * nf_power[i];
1919 		n += val;
1920 	}
1921 
1922 	if (!n)
1923 		return 0;
1924 
1925 	return sum / n;
1926 }
1927 
1928 static void
1929 mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
1930 {
1931 	struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
1932 	struct mt7615_phy *phy = mphy->priv;
1933 	struct mt76_channel_state *state;
1934 	u64 busy_time, tx_time, rx_time, obss_time;
1935 	u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5;
1936 	int nf;
1937 
1938 	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1939 				   MT_MIB_SDR9_BUSY_MASK);
1940 	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1941 				 MT_MIB_SDR36_TXTIME_MASK);
1942 	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1943 				 MT_MIB_SDR37_RXTIME_MASK);
1944 	obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK);
1945 
1946 	nf = mt7615_phy_get_nf(dev, idx);
1947 	if (!phy->noise)
1948 		phy->noise = nf << 4;
1949 	else if (nf)
1950 		phy->noise += nf - (phy->noise >> 4);
1951 
1952 	state = mphy->chan_state;
1953 	state->cc_busy += busy_time;
1954 	state->cc_tx += tx_time;
1955 	state->cc_rx += rx_time + obss_time;
1956 	state->cc_bss_rx += rx_time;
1957 	state->noise = -(phy->noise >> 4);
1958 }
1959 
1960 static void mt7615_update_survey(struct mt7615_dev *dev)
1961 {
1962 	struct mt76_dev *mdev = &dev->mt76;
1963 	struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
1964 	ktime_t cur_time;
1965 
1966 	/* MT7615 can only update both phys simultaneously
1967 	 * since some reisters are shared across bands.
1968 	 */
1969 
1970 	mt7615_phy_update_channel(&mdev->phy, 0);
1971 	if (mphy_ext)
1972 		mt7615_phy_update_channel(mphy_ext, 1);
1973 
1974 	cur_time = ktime_get_boottime();
1975 
1976 	mt76_update_survey_active_time(&mdev->phy, cur_time);
1977 	if (mphy_ext)
1978 		mt76_update_survey_active_time(mphy_ext, cur_time);
1979 
1980 	/* reset obss airtime */
1981 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
1982 }
1983 
1984 void mt7615_update_channel(struct mt76_phy *mphy)
1985 {
1986 	struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
1987 
1988 	if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
1989 		return;
1990 
1991 	mt7615_update_survey(dev);
1992 	mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1993 }
1994 EXPORT_SYMBOL_GPL(mt7615_update_channel);
1995 
1996 static void
1997 mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
1998 {
1999 	struct mt7615_dev *dev = phy->dev;
2000 	struct mib_stats *mib = &phy->mib;
2001 	bool ext_phy = phy != &dev->phy;
2002 	int i, aggr;
2003 	u32 val, val2;
2004 
2005 	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
2006 					   MT_MIB_SDR3_FCS_ERR_MASK);
2007 
2008 	val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
2009 			     MT_MIB_AMPDU_MPDU_COUNT);
2010 	if (val) {
2011 		val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy),
2012 				      MT_MIB_AMPDU_ACK_COUNT);
2013 		mib->aggr_per = 1000 * (val - val2) / val;
2014 	}
2015 
2016 	aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
2017 	for (i = 0; i < 4; i++) {
2018 		val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
2019 		mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
2020 		mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
2021 					       val);
2022 
2023 		val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
2024 		mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
2025 		mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
2026 						  val);
2027 
2028 		val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
2029 		dev->mt76.aggr_stats[aggr++] += val & 0xffff;
2030 		dev->mt76.aggr_stats[aggr++] += val >> 16;
2031 	}
2032 }
2033 
2034 void mt7615_pm_wake_work(struct work_struct *work)
2035 {
2036 	struct mt7615_dev *dev;
2037 	struct mt76_phy *mphy;
2038 
2039 	dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
2040 						pm.wake_work);
2041 	mphy = dev->phy.mt76;
2042 
2043 	if (!mt7615_mcu_set_drv_ctrl(dev)) {
2044 		struct mt76_dev *mdev = &dev->mt76;
2045 		int i;
2046 
2047 		if (mt76_is_sdio(mdev)) {
2048 			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
2049 			mt76_worker_schedule(&mdev->sdio.txrx_worker);
2050 		} else {
2051 			local_bh_disable();
2052 			mt76_for_each_q_rx(mdev, i)
2053 				napi_schedule(&mdev->napi[i]);
2054 			local_bh_enable();
2055 			mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
2056 			mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
2057 					      false);
2058 		}
2059 
2060 		if (test_bit(MT76_STATE_RUNNING, &mphy->state)) {
2061 			unsigned long timeout;
2062 
2063 			timeout = mt7615_get_macwork_timeout(dev);
2064 			ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2065 						     timeout);
2066 		}
2067 	}
2068 
2069 	ieee80211_wake_queues(mphy->hw);
2070 	wake_up(&dev->pm.wait);
2071 }
2072 
2073 void mt7615_pm_power_save_work(struct work_struct *work)
2074 {
2075 	struct mt7615_dev *dev;
2076 	unsigned long delta;
2077 
2078 	dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
2079 						pm.ps_work.work);
2080 
2081 	delta = dev->pm.idle_timeout;
2082 	if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
2083 	    test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
2084 		goto out;
2085 
2086 	if (mutex_is_locked(&dev->mt76.mutex))
2087 		/* if mt76 mutex is held we should not put the device
2088 		 * to sleep since we are currently accessing device
2089 		 * register map. We need to wait for the next power_save
2090 		 * trigger.
2091 		 */
2092 		goto out;
2093 
2094 	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
2095 		delta = dev->pm.last_activity + delta - jiffies;
2096 		goto out;
2097 	}
2098 
2099 	if (!mt7615_mcu_set_fw_ctrl(dev))
2100 		return;
2101 out:
2102 	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
2103 }
2104 
2105 void mt7615_mac_work(struct work_struct *work)
2106 {
2107 	struct mt7615_phy *phy;
2108 	struct mt76_phy *mphy;
2109 	unsigned long timeout;
2110 
2111 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2112 					       mac_work.work);
2113 	phy = mphy->priv;
2114 
2115 	mt7615_mutex_acquire(phy->dev);
2116 
2117 	mt7615_update_survey(phy->dev);
2118 	if (++mphy->mac_work_count == 5) {
2119 		mphy->mac_work_count = 0;
2120 
2121 		mt7615_mac_update_mib_stats(phy);
2122 		mt7615_mac_scs_check(phy);
2123 	}
2124 
2125 	mt7615_mutex_release(phy->dev);
2126 
2127 	mt76_tx_status_check(mphy->dev, false);
2128 
2129 	timeout = mt7615_get_macwork_timeout(phy->dev);
2130 	ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout);
2131 }
2132 
2133 void mt7615_tx_token_put(struct mt7615_dev *dev)
2134 {
2135 	struct mt76_txwi_cache *txwi;
2136 	int id;
2137 
2138 	spin_lock_bh(&dev->mt76.token_lock);
2139 	idr_for_each_entry(&dev->mt76.token, txwi, id)
2140 		mt7615_txwi_free(dev, txwi);
2141 	spin_unlock_bh(&dev->mt76.token_lock);
2142 	idr_destroy(&dev->mt76.token);
2143 }
2144 EXPORT_SYMBOL_GPL(mt7615_tx_token_put);
2145 
2146 static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
2147 {
2148 	struct mt7615_dev *dev = phy->dev;
2149 
2150 	if (phy->rdd_state & BIT(0))
2151 		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
2152 					MT_RX_SEL0, 0);
2153 	if (phy->rdd_state & BIT(1))
2154 		mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
2155 					MT_RX_SEL0, 0);
2156 }
2157 
2158 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
2159 {
2160 	int err;
2161 
2162 	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
2163 				      MT_RX_SEL0, 0);
2164 	if (err < 0)
2165 		return err;
2166 
2167 	return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
2168 				       MT_RX_SEL0, 1);
2169 }
2170 
2171 static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
2172 {
2173 	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2174 	struct mt7615_dev *dev = phy->dev;
2175 	bool ext_phy = phy != &dev->phy;
2176 	int err;
2177 
2178 	/* start CAC */
2179 	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
2180 				      MT_RX_SEL0, 0);
2181 	if (err < 0)
2182 		return err;
2183 
2184 	err = mt7615_dfs_start_rdd(dev, ext_phy);
2185 	if (err < 0)
2186 		return err;
2187 
2188 	phy->rdd_state |= BIT(ext_phy);
2189 
2190 	if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2191 	    chandef->width == NL80211_CHAN_WIDTH_80P80) {
2192 		err = mt7615_dfs_start_rdd(dev, 1);
2193 		if (err < 0)
2194 			return err;
2195 
2196 		phy->rdd_state |= BIT(1);
2197 	}
2198 
2199 	return 0;
2200 }
2201 
2202 static int
2203 mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
2204 {
2205 	const struct mt7615_dfs_radar_spec *radar_specs;
2206 	struct mt7615_dev *dev = phy->dev;
2207 	int err, i, lpn = 500;
2208 
2209 	switch (dev->mt76.region) {
2210 	case NL80211_DFS_FCC:
2211 		radar_specs = &fcc_radar_specs;
2212 		lpn = 8;
2213 		break;
2214 	case NL80211_DFS_ETSI:
2215 		radar_specs = &etsi_radar_specs;
2216 		break;
2217 	case NL80211_DFS_JP:
2218 		radar_specs = &jp_radar_specs;
2219 		break;
2220 	default:
2221 		return -EINVAL;
2222 	}
2223 
2224 	/* avoid FCC radar detection in non-FCC region */
2225 	err = mt7615_mcu_set_fcc5_lpn(dev, lpn);
2226 	if (err < 0)
2227 		return err;
2228 
2229 	for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2230 		err = mt7615_mcu_set_radar_th(dev, i,
2231 					      &radar_specs->radar_pattern[i]);
2232 		if (err < 0)
2233 			return err;
2234 	}
2235 
2236 	return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2237 }
2238 
2239 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
2240 {
2241 	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2242 	struct mt7615_dev *dev = phy->dev;
2243 	bool ext_phy = phy != &dev->phy;
2244 	enum mt76_dfs_state dfs_state, prev_state;
2245 	int err;
2246 
2247 	if (is_mt7663(&dev->mt76))
2248 		return 0;
2249 
2250 	prev_state = phy->mt76->dfs_state;
2251 	dfs_state = mt76_phy_dfs_state(phy->mt76);
2252 	if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
2253 	    dfs_state < MT_DFS_STATE_CAC)
2254 		dfs_state = MT_DFS_STATE_ACTIVE;
2255 
2256 	if (prev_state == dfs_state)
2257 		return 0;
2258 
2259 	if (dfs_state == MT_DFS_STATE_DISABLED)
2260 		goto stop;
2261 
2262 	if (prev_state <= MT_DFS_STATE_DISABLED) {
2263 		err = mt7615_dfs_init_radar_specs(phy);
2264 		if (err < 0)
2265 			return err;
2266 
2267 		err = mt7615_dfs_start_radar_detector(phy);
2268 		if (err < 0)
2269 			return err;
2270 
2271 		phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2272 	}
2273 
2274 	if (dfs_state == MT_DFS_STATE_CAC)
2275 		return 0;
2276 
2277 	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
2278 				      ext_phy, MT_RX_SEL0, 0);
2279 	if (err < 0) {
2280 		phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2281 		return err;
2282 	}
2283 
2284 	phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2285 	return 0;
2286 
2287 stop:
2288 	err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
2289 				      MT_RX_SEL0, 0);
2290 	if (err < 0)
2291 		return err;
2292 
2293 	mt7615_dfs_stop_radar_detector(phy);
2294 	phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2295 
2296 	return 0;
2297 }
2298 
2299 int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
2300 				 struct ieee80211_vif *vif,
2301 				 bool enable)
2302 {
2303 	struct mt7615_dev *dev = phy->dev;
2304 	bool ext_phy = phy != &dev->phy;
2305 	int err;
2306 
2307 	if (!mt7615_firmware_offload(dev))
2308 		return -EOPNOTSUPP;
2309 
2310 	switch (vif->type) {
2311 	case NL80211_IFTYPE_MONITOR:
2312 		return 0;
2313 	case NL80211_IFTYPE_MESH_POINT:
2314 	case NL80211_IFTYPE_ADHOC:
2315 	case NL80211_IFTYPE_AP:
2316 		if (enable)
2317 			phy->n_beacon_vif++;
2318 		else
2319 			phy->n_beacon_vif--;
2320 		fallthrough;
2321 	default:
2322 		break;
2323 	}
2324 
2325 	err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif);
2326 	if (err)
2327 		return err;
2328 
2329 	if (phy->n_beacon_vif) {
2330 		vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
2331 		mt76_clear(dev, MT_WF_RFCR(ext_phy),
2332 			   MT_WF_RFCR_DROP_OTHER_BEACON);
2333 	} else {
2334 		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
2335 		mt76_set(dev, MT_WF_RFCR(ext_phy),
2336 			 MT_WF_RFCR_DROP_OTHER_BEACON);
2337 	}
2338 
2339 	return 0;
2340 }
2341 
2342 void mt7615_coredump_work(struct work_struct *work)
2343 {
2344 	struct mt7615_dev *dev;
2345 	char *dump, *data;
2346 
2347 	dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
2348 						coredump.work.work);
2349 
2350 	if (time_is_after_jiffies(dev->coredump.last_activity +
2351 				  4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
2352 		queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
2353 				   MT76_CONNAC_COREDUMP_TIMEOUT);
2354 		return;
2355 	}
2356 
2357 	dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
2358 	data = dump;
2359 
2360 	while (true) {
2361 		struct sk_buff *skb;
2362 
2363 		spin_lock_bh(&dev->mt76.lock);
2364 		skb = __skb_dequeue(&dev->coredump.msg_list);
2365 		spin_unlock_bh(&dev->mt76.lock);
2366 
2367 		if (!skb)
2368 			break;
2369 
2370 		skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
2371 		if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
2372 			dev_kfree_skb(skb);
2373 			continue;
2374 		}
2375 
2376 		memcpy(data, skb->data, skb->len);
2377 		data += skb->len;
2378 
2379 		dev_kfree_skb(skb);
2380 	}
2381 	dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
2382 		      GFP_KERNEL);
2383 }
2384