xref: /freebsd/sys/contrib/dev/mediatek/mt76/mac80211.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #if defined(CONFIG_OF)
7 #include <linux/of.h>
8 #endif
9 #include <net/page_pool.h>
10 #if defined(__FreeBSD__)
11 #include <linux/math64.h>
12 #include <linux/numa.h>
13 #endif
14 #include "mt76.h"
15 
16 #define CHAN2G(_idx, _freq) {			\
17 	.band = NL80211_BAND_2GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN5G(_idx, _freq) {			\
24 	.band = NL80211_BAND_5GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 #define CHAN6G(_idx, _freq) {			\
31 	.band = NL80211_BAND_6GHZ,		\
32 	.center_freq = (_freq),			\
33 	.hw_value = (_idx),			\
34 	.max_power = 30,			\
35 }
36 
37 static const struct ieee80211_channel mt76_channels_2ghz[] = {
38 	CHAN2G(1, 2412),
39 	CHAN2G(2, 2417),
40 	CHAN2G(3, 2422),
41 	CHAN2G(4, 2427),
42 	CHAN2G(5, 2432),
43 	CHAN2G(6, 2437),
44 	CHAN2G(7, 2442),
45 	CHAN2G(8, 2447),
46 	CHAN2G(9, 2452),
47 	CHAN2G(10, 2457),
48 	CHAN2G(11, 2462),
49 	CHAN2G(12, 2467),
50 	CHAN2G(13, 2472),
51 	CHAN2G(14, 2484),
52 };
53 
54 static const struct ieee80211_channel mt76_channels_5ghz[] = {
55 	CHAN5G(36, 5180),
56 	CHAN5G(40, 5200),
57 	CHAN5G(44, 5220),
58 	CHAN5G(48, 5240),
59 
60 	CHAN5G(52, 5260),
61 	CHAN5G(56, 5280),
62 	CHAN5G(60, 5300),
63 	CHAN5G(64, 5320),
64 
65 	CHAN5G(100, 5500),
66 	CHAN5G(104, 5520),
67 	CHAN5G(108, 5540),
68 	CHAN5G(112, 5560),
69 	CHAN5G(116, 5580),
70 	CHAN5G(120, 5600),
71 	CHAN5G(124, 5620),
72 	CHAN5G(128, 5640),
73 	CHAN5G(132, 5660),
74 	CHAN5G(136, 5680),
75 	CHAN5G(140, 5700),
76 	CHAN5G(144, 5720),
77 
78 	CHAN5G(149, 5745),
79 	CHAN5G(153, 5765),
80 	CHAN5G(157, 5785),
81 	CHAN5G(161, 5805),
82 	CHAN5G(165, 5825),
83 	CHAN5G(169, 5845),
84 	CHAN5G(173, 5865),
85 	CHAN5G(177, 5885),
86 };
87 
88 static const struct ieee80211_channel mt76_channels_6ghz[] = {
89 	/* UNII-5 */
90 	CHAN6G(1, 5955),
91 	CHAN6G(5, 5975),
92 	CHAN6G(9, 5995),
93 	CHAN6G(13, 6015),
94 	CHAN6G(17, 6035),
95 	CHAN6G(21, 6055),
96 	CHAN6G(25, 6075),
97 	CHAN6G(29, 6095),
98 	CHAN6G(33, 6115),
99 	CHAN6G(37, 6135),
100 	CHAN6G(41, 6155),
101 	CHAN6G(45, 6175),
102 	CHAN6G(49, 6195),
103 	CHAN6G(53, 6215),
104 	CHAN6G(57, 6235),
105 	CHAN6G(61, 6255),
106 	CHAN6G(65, 6275),
107 	CHAN6G(69, 6295),
108 	CHAN6G(73, 6315),
109 	CHAN6G(77, 6335),
110 	CHAN6G(81, 6355),
111 	CHAN6G(85, 6375),
112 	CHAN6G(89, 6395),
113 	CHAN6G(93, 6415),
114 	/* UNII-6 */
115 	CHAN6G(97, 6435),
116 	CHAN6G(101, 6455),
117 	CHAN6G(105, 6475),
118 	CHAN6G(109, 6495),
119 	CHAN6G(113, 6515),
120 	CHAN6G(117, 6535),
121 	/* UNII-7 */
122 	CHAN6G(121, 6555),
123 	CHAN6G(125, 6575),
124 	CHAN6G(129, 6595),
125 	CHAN6G(133, 6615),
126 	CHAN6G(137, 6635),
127 	CHAN6G(141, 6655),
128 	CHAN6G(145, 6675),
129 	CHAN6G(149, 6695),
130 	CHAN6G(153, 6715),
131 	CHAN6G(157, 6735),
132 	CHAN6G(161, 6755),
133 	CHAN6G(165, 6775),
134 	CHAN6G(169, 6795),
135 	CHAN6G(173, 6815),
136 	CHAN6G(177, 6835),
137 	CHAN6G(181, 6855),
138 	CHAN6G(185, 6875),
139 	/* UNII-8 */
140 	CHAN6G(189, 6895),
141 	CHAN6G(193, 6915),
142 	CHAN6G(197, 6935),
143 	CHAN6G(201, 6955),
144 	CHAN6G(205, 6975),
145 	CHAN6G(209, 6995),
146 	CHAN6G(213, 7015),
147 	CHAN6G(217, 7035),
148 	CHAN6G(221, 7055),
149 	CHAN6G(225, 7075),
150 	CHAN6G(229, 7095),
151 	CHAN6G(233, 7115),
152 };
153 
154 #if defined(CONFIG_MT76_LEDS)
155 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
156 	{ .throughput =   0 * 1024, .blink_time = 334 },
157 	{ .throughput =   1 * 1024, .blink_time = 260 },
158 	{ .throughput =   5 * 1024, .blink_time = 220 },
159 	{ .throughput =  10 * 1024, .blink_time = 190 },
160 	{ .throughput =  20 * 1024, .blink_time = 170 },
161 	{ .throughput =  50 * 1024, .blink_time = 150 },
162 	{ .throughput =  70 * 1024, .blink_time = 130 },
163 	{ .throughput = 100 * 1024, .blink_time = 110 },
164 	{ .throughput = 200 * 1024, .blink_time =  80 },
165 	{ .throughput = 300 * 1024, .blink_time =  50 },
166 };
167 #endif
168 
169 struct ieee80211_rate mt76_rates[] = {
170 	CCK_RATE(0, 10),
171 	CCK_RATE(1, 20),
172 	CCK_RATE(2, 55),
173 	CCK_RATE(3, 110),
174 	OFDM_RATE(11, 60),
175 	OFDM_RATE(15, 90),
176 	OFDM_RATE(10, 120),
177 	OFDM_RATE(14, 180),
178 	OFDM_RATE(9,  240),
179 	OFDM_RATE(13, 360),
180 	OFDM_RATE(8,  480),
181 	OFDM_RATE(12, 540),
182 };
183 EXPORT_SYMBOL_GPL(mt76_rates);
184 
185 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
186 	{ .start_freq = 2402, .end_freq = 2494, },
187 	{ .start_freq = 5150, .end_freq = 5350, },
188 	{ .start_freq = 5350, .end_freq = 5470, },
189 	{ .start_freq = 5470, .end_freq = 5725, },
190 	{ .start_freq = 5725, .end_freq = 5950, },
191 	{ .start_freq = 5945, .end_freq = 6165, },
192 	{ .start_freq = 6165, .end_freq = 6405, },
193 	{ .start_freq = 6405, .end_freq = 6525, },
194 	{ .start_freq = 6525, .end_freq = 6705, },
195 	{ .start_freq = 6705, .end_freq = 6865, },
196 	{ .start_freq = 6865, .end_freq = 7125, },
197 };
198 
199 static const struct cfg80211_sar_capa mt76_sar_capa = {
200 	.type = NL80211_SAR_TYPE_POWER,
201 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
202 	.freq_ranges = &mt76_sar_freq_ranges[0],
203 };
204 
205 #if defined(CONFIG_MT76_LEDS)
206 static int mt76_led_init(struct mt76_phy *phy)
207 {
208 	struct mt76_dev *dev = phy->dev;
209 	struct ieee80211_hw *hw = phy->hw;
210 
211 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
212 		return 0;
213 
214 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
215 		 wiphy_name(hw->wiphy));
216 
217 	phy->leds.cdev.name = phy->leds.name;
218 	phy->leds.cdev.default_trigger =
219 		ieee80211_create_tpt_led_trigger(hw,
220 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
221 					mt76_tpt_blink,
222 					ARRAY_SIZE(mt76_tpt_blink));
223 
224 #if defined(CONFIG_OF)
225 	if (phy == &dev->phy) {
226 		struct device_node *np = dev->dev->of_node;
227 
228 		np = of_get_child_by_name(np, "led");
229 		if (np) {
230 			int led_pin;
231 
232 			if (!of_property_read_u32(np, "led-sources", &led_pin))
233 				phy->leds.pin = led_pin;
234 			phy->leds.al = of_property_read_bool(np,
235 							     "led-active-low");
236 			of_node_put(np);
237 		}
238 	}
239 #endif
240 
241 	return led_classdev_register(dev->dev, &phy->leds.cdev);
242 }
243 
244 static void mt76_led_cleanup(struct mt76_phy *phy)
245 {
246 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
247 		return;
248 
249 	led_classdev_unregister(&phy->leds.cdev);
250 }
251 #endif
252 
253 static void mt76_init_stream_cap(struct mt76_phy *phy,
254 				 struct ieee80211_supported_band *sband,
255 				 bool vht)
256 {
257 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
258 	int i, nstream = hweight8(phy->antenna_mask);
259 	struct ieee80211_sta_vht_cap *vht_cap;
260 	u16 mcs_map = 0;
261 
262 	if (nstream > 1)
263 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
264 	else
265 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
266 
267 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
268 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
269 
270 	if (!vht)
271 		return;
272 
273 	vht_cap = &sband->vht_cap;
274 	if (nstream > 1)
275 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
276 	else
277 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
278 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
279 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
280 
281 	for (i = 0; i < 8; i++) {
282 		if (i < nstream)
283 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
284 		else
285 			mcs_map |=
286 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
287 	}
288 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
289 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
290 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
291 		vht_cap->vht_mcs.tx_highest |=
292 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
293 }
294 
295 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
296 {
297 	if (phy->cap.has_2ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
299 	if (phy->cap.has_5ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
301 	if (phy->cap.has_6ghz)
302 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
303 }
304 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
305 
306 static int
307 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
308 		const struct ieee80211_channel *chan, int n_chan,
309 		struct ieee80211_rate *rates, int n_rates,
310 		bool ht, bool vht)
311 {
312 	struct ieee80211_supported_band *sband = &msband->sband;
313 	struct ieee80211_sta_vht_cap *vht_cap;
314 	struct ieee80211_sta_ht_cap *ht_cap;
315 	struct mt76_dev *dev = phy->dev;
316 	void *chanlist;
317 	int size;
318 
319 	size = n_chan * sizeof(*chan);
320 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
321 	if (!chanlist)
322 		return -ENOMEM;
323 
324 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
325 				    GFP_KERNEL);
326 	if (!msband->chan)
327 		return -ENOMEM;
328 
329 	sband->channels = chanlist;
330 	sband->n_channels = n_chan;
331 	sband->bitrates = rates;
332 	sband->n_bitrates = n_rates;
333 
334 	if (!ht)
335 		return 0;
336 
337 	ht_cap = &sband->ht_cap;
338 	ht_cap->ht_supported = true;
339 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
340 		       IEEE80211_HT_CAP_GRN_FLD |
341 		       IEEE80211_HT_CAP_SGI_20 |
342 		       IEEE80211_HT_CAP_SGI_40 |
343 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
344 
345 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
346 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
347 
348 	mt76_init_stream_cap(phy, sband, vht);
349 
350 	if (!vht)
351 		return 0;
352 
353 	vht_cap = &sband->vht_cap;
354 	vht_cap->vht_supported = true;
355 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
356 			IEEE80211_VHT_CAP_RXSTBC_1 |
357 			IEEE80211_VHT_CAP_SHORT_GI_80 |
358 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
359 
360 	return 0;
361 }
362 
363 static int
364 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
365 		   int n_rates)
366 {
367 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
368 
369 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
370 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
371 			       n_rates, true, false);
372 }
373 
374 static int
375 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
376 		   int n_rates, bool vht)
377 {
378 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
379 
380 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
381 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
382 			       n_rates, true, vht);
383 }
384 
385 static int
386 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
387 		   int n_rates)
388 {
389 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
390 
391 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
392 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
393 			       n_rates, false, false);
394 }
395 
396 static void
397 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
398 		 enum nl80211_band band)
399 {
400 	struct ieee80211_supported_band *sband = &msband->sband;
401 	bool found = false;
402 	int i;
403 
404 	if (!sband)
405 		return;
406 
407 	for (i = 0; i < sband->n_channels; i++) {
408 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
409 			continue;
410 
411 		found = true;
412 		break;
413 	}
414 
415 	if (found) {
416 		phy->chandef.chan = &sband->channels[0];
417 		phy->chan_state = &msband->chan[0];
418 		return;
419 	}
420 
421 	sband->n_channels = 0;
422 	phy->hw->wiphy->bands[band] = NULL;
423 }
424 
425 static int
426 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
427 {
428 	struct mt76_dev *dev = phy->dev;
429 	struct wiphy *wiphy = hw->wiphy;
430 
431 	SET_IEEE80211_DEV(hw, dev->dev);
432 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
433 
434 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
435 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
436 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
437 			WIPHY_FLAG_SUPPORTS_TDLS |
438 			WIPHY_FLAG_AP_UAPSD;
439 
440 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
441 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
442 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
443 
444 	wiphy->available_antennas_tx = phy->antenna_mask;
445 	wiphy->available_antennas_rx = phy->antenna_mask;
446 
447 	wiphy->sar_capa = &mt76_sar_capa;
448 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
449 				sizeof(struct mt76_freq_range_power),
450 				GFP_KERNEL);
451 	if (!phy->frp)
452 		return -ENOMEM;
453 
454 	hw->txq_data_size = sizeof(struct mt76_txq);
455 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
456 
457 	if (!hw->max_tx_fragments)
458 		hw->max_tx_fragments = 16;
459 
460 	ieee80211_hw_set(hw, SIGNAL_DBM);
461 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
462 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
463 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
464 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
465 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
466 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
467 
468 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
469 		ieee80211_hw_set(hw, TX_AMSDU);
470 		ieee80211_hw_set(hw, TX_FRAG_LIST);
471 	}
472 
473 	ieee80211_hw_set(hw, MFP_CAPABLE);
474 	ieee80211_hw_set(hw, AP_LINK_PS);
475 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
476 
477 	return 0;
478 }
479 
480 struct mt76_phy *
481 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
482 	       const struct ieee80211_ops *ops, u8 band_idx)
483 {
484 	struct ieee80211_hw *hw;
485 	unsigned int phy_size;
486 	struct mt76_phy *phy;
487 
488 	phy_size = ALIGN(sizeof(*phy), 8);
489 	hw = ieee80211_alloc_hw(size + phy_size, ops);
490 	if (!hw)
491 		return NULL;
492 
493 	phy = hw->priv;
494 	phy->dev = dev;
495 	phy->hw = hw;
496 #if defined(__linux__)
497 	phy->priv = hw->priv + phy_size;
498 #elif defined(__FreeBSD__)
499 	phy->priv = (u8 *)hw->priv + phy_size;
500 #endif
501 	phy->band_idx = band_idx;
502 
503 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
504 	hw->wiphy->interface_modes =
505 		BIT(NL80211_IFTYPE_STATION) |
506 		BIT(NL80211_IFTYPE_AP) |
507 #ifdef CONFIG_MAC80211_MESH
508 		BIT(NL80211_IFTYPE_MESH_POINT) |
509 #endif
510 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
511 		BIT(NL80211_IFTYPE_P2P_GO) |
512 		BIT(NL80211_IFTYPE_ADHOC);
513 
514 	return phy;
515 }
516 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
517 
518 int mt76_register_phy(struct mt76_phy *phy, bool vht,
519 		      struct ieee80211_rate *rates, int n_rates)
520 {
521 	int ret;
522 
523 	ret = mt76_phy_init(phy, phy->hw);
524 	if (ret)
525 		return ret;
526 
527 	if (phy->cap.has_2ghz) {
528 		ret = mt76_init_sband_2g(phy, rates, n_rates);
529 		if (ret)
530 			return ret;
531 	}
532 
533 	if (phy->cap.has_5ghz) {
534 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
535 		if (ret)
536 			return ret;
537 	}
538 
539 	if (phy->cap.has_6ghz) {
540 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
541 		if (ret)
542 			return ret;
543 	}
544 
545 #if defined(CONFIG_MT76_LEDS)
546 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
547 		ret = mt76_led_init(phy);
548 		if (ret)
549 			return ret;
550 	}
551 #endif
552 
553 	wiphy_read_of_freq_limits(phy->hw->wiphy);
554 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
555 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
556 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
557 
558 	ret = ieee80211_register_hw(phy->hw);
559 	if (ret)
560 		return ret;
561 
562 	set_bit(MT76_STATE_REGISTERED, &phy->state);
563 	phy->dev->phys[phy->band_idx] = phy;
564 
565 	return 0;
566 }
567 EXPORT_SYMBOL_GPL(mt76_register_phy);
568 
569 void mt76_unregister_phy(struct mt76_phy *phy)
570 {
571 	struct mt76_dev *dev = phy->dev;
572 
573 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
574 		return;
575 
576 #if defined(CONFIG_MT76_LEDS)
577 	if (IS_ENABLED(CONFIG_MT76_LEDS))
578 		mt76_led_cleanup(phy);
579 #endif
580 	mt76_tx_status_check(dev, true);
581 	ieee80211_unregister_hw(phy->hw);
582 	dev->phys[phy->band_idx] = NULL;
583 }
584 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
585 
586 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
587 {
588 	struct page_pool_params pp_params = {
589 		.order = 0,
590 		.flags = PP_FLAG_PAGE_FRAG,
591 		.nid = NUMA_NO_NODE,
592 		.dev = dev->dma_dev,
593 	};
594 	int idx = q - dev->q_rx;
595 
596 	switch (idx) {
597 	case MT_RXQ_MAIN:
598 	case MT_RXQ_BAND1:
599 	case MT_RXQ_BAND2:
600 		pp_params.pool_size = 256;
601 		break;
602 	default:
603 		pp_params.pool_size = 16;
604 		break;
605 	}
606 
607 	if (mt76_is_mmio(dev)) {
608 		/* rely on page_pool for DMA mapping */
609 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
610 		pp_params.dma_dir = DMA_FROM_DEVICE;
611 		pp_params.max_len = PAGE_SIZE;
612 		pp_params.offset = 0;
613 	}
614 
615 	q->page_pool = page_pool_create(&pp_params);
616 	if (IS_ERR(q->page_pool)) {
617 		int err = PTR_ERR(q->page_pool);
618 
619 		q->page_pool = NULL;
620 		return err;
621 	}
622 
623 	return 0;
624 }
625 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
626 
627 struct mt76_dev *
628 mt76_alloc_device(struct device *pdev, unsigned int size,
629 		  const struct ieee80211_ops *ops,
630 		  const struct mt76_driver_ops *drv_ops)
631 {
632 	struct ieee80211_hw *hw;
633 	struct mt76_phy *phy;
634 	struct mt76_dev *dev;
635 	int i;
636 
637 	hw = ieee80211_alloc_hw(size, ops);
638 	if (!hw)
639 		return NULL;
640 
641 	dev = hw->priv;
642 	dev->hw = hw;
643 	dev->dev = pdev;
644 	dev->drv = drv_ops;
645 	dev->dma_dev = pdev;
646 
647 	phy = &dev->phy;
648 	phy->dev = dev;
649 	phy->hw = hw;
650 	phy->band_idx = MT_BAND0;
651 	dev->phys[phy->band_idx] = phy;
652 
653 	spin_lock_init(&dev->rx_lock);
654 	spin_lock_init(&dev->lock);
655 	spin_lock_init(&dev->cc_lock);
656 	spin_lock_init(&dev->status_lock);
657 	spin_lock_init(&dev->wed_lock);
658 	mutex_init(&dev->mutex);
659 	init_waitqueue_head(&dev->tx_wait);
660 
661 	skb_queue_head_init(&dev->mcu.res_q);
662 	init_waitqueue_head(&dev->mcu.wait);
663 	mutex_init(&dev->mcu.mutex);
664 	dev->tx_worker.fn = mt76_tx_worker;
665 
666 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
667 	hw->wiphy->interface_modes =
668 		BIT(NL80211_IFTYPE_STATION) |
669 		BIT(NL80211_IFTYPE_AP) |
670 #ifdef CONFIG_MAC80211_MESH
671 		BIT(NL80211_IFTYPE_MESH_POINT) |
672 #endif
673 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
674 		BIT(NL80211_IFTYPE_P2P_GO) |
675 		BIT(NL80211_IFTYPE_ADHOC);
676 
677 	spin_lock_init(&dev->token_lock);
678 	idr_init(&dev->token);
679 
680 	spin_lock_init(&dev->rx_token_lock);
681 	idr_init(&dev->rx_token);
682 
683 	INIT_LIST_HEAD(&dev->wcid_list);
684 	INIT_LIST_HEAD(&dev->sta_poll_list);
685 	spin_lock_init(&dev->sta_poll_lock);
686 
687 	INIT_LIST_HEAD(&dev->txwi_cache);
688 	INIT_LIST_HEAD(&dev->rxwi_cache);
689 	dev->token_size = dev->drv->token_size;
690 
691 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
692 		skb_queue_head_init(&dev->rx_skb[i]);
693 
694 	dev->wq = alloc_ordered_workqueue("mt76", 0);
695 	if (!dev->wq) {
696 		ieee80211_free_hw(hw);
697 		return NULL;
698 	}
699 
700 	return dev;
701 }
702 EXPORT_SYMBOL_GPL(mt76_alloc_device);
703 
704 int mt76_register_device(struct mt76_dev *dev, bool vht,
705 			 struct ieee80211_rate *rates, int n_rates)
706 {
707 	struct ieee80211_hw *hw = dev->hw;
708 	struct mt76_phy *phy = &dev->phy;
709 	int ret;
710 
711 	dev_set_drvdata(dev->dev, dev);
712 	ret = mt76_phy_init(phy, hw);
713 	if (ret)
714 		return ret;
715 
716 	if (phy->cap.has_2ghz) {
717 		ret = mt76_init_sband_2g(phy, rates, n_rates);
718 		if (ret)
719 			return ret;
720 	}
721 
722 	if (phy->cap.has_5ghz) {
723 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
724 		if (ret)
725 			return ret;
726 	}
727 
728 	if (phy->cap.has_6ghz) {
729 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
730 		if (ret)
731 			return ret;
732 	}
733 
734 	wiphy_read_of_freq_limits(hw->wiphy);
735 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
736 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
737 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
738 
739 #if defined(CONFIG_MT76_LEDS)
740 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
741 		ret = mt76_led_init(phy);
742 		if (ret)
743 			return ret;
744 	}
745 #endif
746 
747 	ret = ieee80211_register_hw(hw);
748 	if (ret)
749 		return ret;
750 
751 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
752 	set_bit(MT76_STATE_REGISTERED, &phy->state);
753 	sched_set_fifo_low(dev->tx_worker.task);
754 
755 	return 0;
756 }
757 EXPORT_SYMBOL_GPL(mt76_register_device);
758 
759 void mt76_unregister_device(struct mt76_dev *dev)
760 {
761 #if defined(__linux__)
762 	struct ieee80211_hw *hw = dev->hw;
763 #endif
764 
765 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
766 		return;
767 
768 #if defined(CONFIG_MT76_LEDS)
769 	if (IS_ENABLED(CONFIG_MT76_LEDS))
770 		mt76_led_cleanup(&dev->phy);
771 #endif
772 	mt76_tx_status_check(dev, true);
773 #if defined(__linux__)
774 	ieee80211_unregister_hw(hw);
775 #elif defined(__FreeBSD__)
776 	ieee80211_unregister_hw(dev->hw);
777 #endif
778 }
779 EXPORT_SYMBOL_GPL(mt76_unregister_device);
780 
781 void mt76_free_device(struct mt76_dev *dev)
782 {
783 	mt76_worker_teardown(&dev->tx_worker);
784 	if (dev->wq) {
785 		destroy_workqueue(dev->wq);
786 		dev->wq = NULL;
787 	}
788 	ieee80211_free_hw(dev->hw);
789 }
790 EXPORT_SYMBOL_GPL(mt76_free_device);
791 
792 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
793 {
794 	struct sk_buff *skb = phy->rx_amsdu[q].head;
795 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
796 	struct mt76_dev *dev = phy->dev;
797 
798 	phy->rx_amsdu[q].head = NULL;
799 	phy->rx_amsdu[q].tail = NULL;
800 
801 	/*
802 	 * Validate if the amsdu has a proper first subframe.
803 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
804 	 * flag of the QoS header gets flipped. In such cases, the first
805 	 * subframe has a LLC/SNAP header in the location of the destination
806 	 * address.
807 	 */
808 	if (skb_shinfo(skb)->frag_list) {
809 		int offset = 0;
810 
811 		if (!(status->flag & RX_FLAG_8023)) {
812 			offset = ieee80211_get_hdrlen_from_skb(skb);
813 
814 			if ((status->flag &
815 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
816 			    RX_FLAG_DECRYPTED)
817 				offset += 8;
818 		}
819 
820 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
821 			dev_kfree_skb(skb);
822 			return;
823 		}
824 	}
825 	__skb_queue_tail(&dev->rx_skb[q], skb);
826 }
827 
828 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
829 				  struct sk_buff *skb)
830 {
831 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
832 
833 	if (phy->rx_amsdu[q].head &&
834 	    (!status->amsdu || status->first_amsdu ||
835 	     status->seqno != phy->rx_amsdu[q].seqno))
836 		mt76_rx_release_amsdu(phy, q);
837 
838 	if (!phy->rx_amsdu[q].head) {
839 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
840 		phy->rx_amsdu[q].seqno = status->seqno;
841 		phy->rx_amsdu[q].head = skb;
842 	} else {
843 		*phy->rx_amsdu[q].tail = skb;
844 		phy->rx_amsdu[q].tail = &skb->next;
845 	}
846 
847 	if (!status->amsdu || status->last_amsdu)
848 		mt76_rx_release_amsdu(phy, q);
849 }
850 
851 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
852 {
853 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
854 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
855 
856 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
857 		dev_kfree_skb(skb);
858 		return;
859 	}
860 
861 #ifdef CONFIG_NL80211_TESTMODE
862 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
863 		phy->test.rx_stats.packets[q]++;
864 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
865 			phy->test.rx_stats.fcs_error[q]++;
866 	}
867 #endif
868 
869 	mt76_rx_release_burst(phy, q, skb);
870 }
871 EXPORT_SYMBOL_GPL(mt76_rx);
872 
873 bool mt76_has_tx_pending(struct mt76_phy *phy)
874 {
875 	struct mt76_queue *q;
876 	int i;
877 
878 	for (i = 0; i < __MT_TXQ_MAX; i++) {
879 		q = phy->q_tx[i];
880 		if (q && q->queued)
881 			return true;
882 	}
883 
884 	return false;
885 }
886 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
887 
888 static struct mt76_channel_state *
889 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
890 {
891 	struct mt76_sband *msband;
892 	int idx;
893 
894 	if (c->band == NL80211_BAND_2GHZ)
895 		msband = &phy->sband_2g;
896 	else if (c->band == NL80211_BAND_6GHZ)
897 		msband = &phy->sband_6g;
898 	else
899 		msband = &phy->sband_5g;
900 
901 	idx = c - &msband->sband.channels[0];
902 	return &msband->chan[idx];
903 }
904 
905 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
906 {
907 	struct mt76_channel_state *state = phy->chan_state;
908 
909 	state->cc_active += ktime_to_us(ktime_sub(time,
910 						  phy->survey_time));
911 	phy->survey_time = time;
912 }
913 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
914 
915 void mt76_update_survey(struct mt76_phy *phy)
916 {
917 	struct mt76_dev *dev = phy->dev;
918 	ktime_t cur_time;
919 
920 	if (dev->drv->update_survey)
921 		dev->drv->update_survey(phy);
922 
923 	cur_time = ktime_get_boottime();
924 	mt76_update_survey_active_time(phy, cur_time);
925 
926 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
927 		struct mt76_channel_state *state = phy->chan_state;
928 
929 		spin_lock_bh(&dev->cc_lock);
930 		state->cc_bss_rx += dev->cur_cc_bss_rx;
931 		dev->cur_cc_bss_rx = 0;
932 		spin_unlock_bh(&dev->cc_lock);
933 	}
934 }
935 EXPORT_SYMBOL_GPL(mt76_update_survey);
936 
937 void mt76_set_channel(struct mt76_phy *phy)
938 {
939 	struct mt76_dev *dev = phy->dev;
940 	struct ieee80211_hw *hw = phy->hw;
941 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
942 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
943 	int timeout = HZ / 5;
944 
945 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
946 	mt76_update_survey(phy);
947 
948 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
949 	    phy->chandef.width != chandef->width)
950 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
951 
952 	phy->chandef = *chandef;
953 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
954 
955 	if (!offchannel)
956 		phy->main_chan = chandef->chan;
957 
958 	if (chandef->chan != phy->main_chan)
959 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
960 }
961 EXPORT_SYMBOL_GPL(mt76_set_channel);
962 
963 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
964 		    struct survey_info *survey)
965 {
966 	struct mt76_phy *phy = hw->priv;
967 	struct mt76_dev *dev = phy->dev;
968 	struct mt76_sband *sband;
969 	struct ieee80211_channel *chan;
970 	struct mt76_channel_state *state;
971 	int ret = 0;
972 
973 	mutex_lock(&dev->mutex);
974 	if (idx == 0 && dev->drv->update_survey)
975 		mt76_update_survey(phy);
976 
977 	if (idx >= phy->sband_2g.sband.n_channels +
978 		   phy->sband_5g.sband.n_channels) {
979 		idx -= (phy->sband_2g.sband.n_channels +
980 			phy->sband_5g.sband.n_channels);
981 		sband = &phy->sband_6g;
982 	} else if (idx >= phy->sband_2g.sband.n_channels) {
983 		idx -= phy->sband_2g.sband.n_channels;
984 		sband = &phy->sband_5g;
985 	} else {
986 		sband = &phy->sband_2g;
987 	}
988 
989 	if (idx >= sband->sband.n_channels) {
990 		ret = -ENOENT;
991 		goto out;
992 	}
993 
994 	chan = &sband->sband.channels[idx];
995 	state = mt76_channel_state(phy, chan);
996 
997 	memset(survey, 0, sizeof(*survey));
998 	survey->channel = chan;
999 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1000 	survey->filled |= dev->drv->survey_flags;
1001 	if (state->noise)
1002 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1003 
1004 	if (chan == phy->main_chan) {
1005 		survey->filled |= SURVEY_INFO_IN_USE;
1006 
1007 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1008 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1009 	}
1010 
1011 	survey->time_busy = div_u64(state->cc_busy, 1000);
1012 	survey->time_rx = div_u64(state->cc_rx, 1000);
1013 	survey->time = div_u64(state->cc_active, 1000);
1014 	survey->noise = state->noise;
1015 
1016 	spin_lock_bh(&dev->cc_lock);
1017 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1018 	survey->time_tx = div_u64(state->cc_tx, 1000);
1019 	spin_unlock_bh(&dev->cc_lock);
1020 
1021 out:
1022 	mutex_unlock(&dev->mutex);
1023 
1024 	return ret;
1025 }
1026 EXPORT_SYMBOL_GPL(mt76_get_survey);
1027 
1028 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1029 			 struct ieee80211_key_conf *key)
1030 {
1031 	struct ieee80211_key_seq seq;
1032 	int i;
1033 
1034 	wcid->rx_check_pn = false;
1035 
1036 	if (!key)
1037 		return;
1038 
1039 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1040 		return;
1041 
1042 	wcid->rx_check_pn = true;
1043 
1044 	/* data frame */
1045 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1046 		ieee80211_get_key_rx_seq(key, i, &seq);
1047 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1048 	}
1049 
1050 	/* robust management frame */
1051 	ieee80211_get_key_rx_seq(key, -1, &seq);
1052 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1053 
1054 }
1055 EXPORT_SYMBOL(mt76_wcid_key_setup);
1056 
1057 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1058 {
1059 	int signal = -128;
1060 	u8 chains;
1061 
1062 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1063 		int cur, diff;
1064 
1065 		cur = *chain_signal;
1066 		if (!(chains & BIT(0)) ||
1067 		    cur > 0)
1068 			continue;
1069 
1070 		if (cur > signal)
1071 			swap(cur, signal);
1072 
1073 		diff = signal - cur;
1074 		if (diff == 0)
1075 			signal += 3;
1076 		else if (diff <= 2)
1077 			signal += 2;
1078 		else if (diff <= 6)
1079 			signal += 1;
1080 	}
1081 
1082 	return signal;
1083 }
1084 EXPORT_SYMBOL(mt76_rx_signal);
1085 
1086 static void
1087 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1088 		struct ieee80211_hw **hw,
1089 		struct ieee80211_sta **sta)
1090 {
1091 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1092 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1093 	struct mt76_rx_status mstat;
1094 
1095 	mstat = *((struct mt76_rx_status *)skb->cb);
1096 	memset(status, 0, sizeof(*status));
1097 
1098 	status->flag = mstat.flag;
1099 	status->freq = mstat.freq;
1100 	status->enc_flags = mstat.enc_flags;
1101 	status->encoding = mstat.encoding;
1102 	status->bw = mstat.bw;
1103 	if (status->encoding == RX_ENC_EHT) {
1104 		status->eht.ru = mstat.eht.ru;
1105 		status->eht.gi = mstat.eht.gi;
1106 	} else {
1107 		status->he_ru = mstat.he_ru;
1108 		status->he_gi = mstat.he_gi;
1109 		status->he_dcm = mstat.he_dcm;
1110 	}
1111 	status->rate_idx = mstat.rate_idx;
1112 	status->nss = mstat.nss;
1113 	status->band = mstat.band;
1114 	status->signal = mstat.signal;
1115 	status->chains = mstat.chains;
1116 	status->ampdu_reference = mstat.ampdu_ref;
1117 	status->device_timestamp = mstat.timestamp;
1118 	status->mactime = mstat.timestamp;
1119 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1120 	if (status->signal <= -128)
1121 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1122 
1123 	if (ieee80211_is_beacon(hdr->frame_control) ||
1124 	    ieee80211_is_probe_resp(hdr->frame_control))
1125 		status->boottime_ns = ktime_get_boottime_ns();
1126 
1127 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1128 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1129 		     sizeof(mstat.chain_signal));
1130 	memcpy(status->chain_signal, mstat.chain_signal,
1131 	       sizeof(mstat.chain_signal));
1132 
1133 	*sta = wcid_to_sta(mstat.wcid);
1134 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1135 }
1136 
1137 static void
1138 mt76_check_ccmp_pn(struct sk_buff *skb)
1139 {
1140 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1141 	struct mt76_wcid *wcid = status->wcid;
1142 	struct ieee80211_hdr *hdr;
1143 	int security_idx;
1144 	int ret;
1145 
1146 	if (!(status->flag & RX_FLAG_DECRYPTED))
1147 		return;
1148 
1149 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1150 		return;
1151 
1152 	if (!wcid || !wcid->rx_check_pn)
1153 		return;
1154 
1155 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1156 	if (status->flag & RX_FLAG_8023)
1157 		goto skip_hdr_check;
1158 
1159 	hdr = mt76_skb_get_hdr(skb);
1160 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1161 		/*
1162 		 * Validate the first fragment both here and in mac80211
1163 		 * All further fragments will be validated by mac80211 only.
1164 		 */
1165 		if (ieee80211_is_frag(hdr) &&
1166 		    !ieee80211_is_first_frag(hdr->frame_control))
1167 			return;
1168 	}
1169 
1170 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1171 	 *
1172 	 * the recipient shall maintain a single replay counter for received
1173 	 * individually addressed robust Management frames that are received
1174 	 * with the To DS subfield equal to 0, [...]
1175 	 */
1176 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1177 	    !ieee80211_has_tods(hdr->frame_control))
1178 		security_idx = IEEE80211_NUM_TIDS;
1179 
1180 skip_hdr_check:
1181 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1182 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1183 		     sizeof(status->iv));
1184 	if (ret <= 0) {
1185 		status->flag |= RX_FLAG_ONLY_MONITOR;
1186 		return;
1187 	}
1188 
1189 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1190 
1191 	if (status->flag & RX_FLAG_IV_STRIPPED)
1192 		status->flag |= RX_FLAG_PN_VALIDATED;
1193 }
1194 
1195 static void
1196 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1197 		    int len)
1198 {
1199 	struct mt76_wcid *wcid = status->wcid;
1200 	struct ieee80211_rx_status info = {
1201 		.enc_flags = status->enc_flags,
1202 		.rate_idx = status->rate_idx,
1203 		.encoding = status->encoding,
1204 		.band = status->band,
1205 		.nss = status->nss,
1206 		.bw = status->bw,
1207 	};
1208 	struct ieee80211_sta *sta;
1209 	u32 airtime;
1210 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1211 
1212 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1213 	spin_lock(&dev->cc_lock);
1214 	dev->cur_cc_bss_rx += airtime;
1215 	spin_unlock(&dev->cc_lock);
1216 
1217 	if (!wcid || !wcid->sta)
1218 		return;
1219 
1220 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1221 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1222 }
1223 
1224 static void
1225 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1226 {
1227 	struct mt76_wcid *wcid;
1228 	int wcid_idx;
1229 
1230 	if (!dev->rx_ampdu_len)
1231 		return;
1232 
1233 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1234 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1235 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1236 	else
1237 		wcid = NULL;
1238 	dev->rx_ampdu_status.wcid = wcid;
1239 
1240 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1241 
1242 	dev->rx_ampdu_len = 0;
1243 	dev->rx_ampdu_ref = 0;
1244 }
1245 
1246 static void
1247 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1248 {
1249 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1250 	struct mt76_wcid *wcid = status->wcid;
1251 
1252 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1253 		return;
1254 
1255 	if (!wcid || !wcid->sta) {
1256 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1257 
1258 		if (status->flag & RX_FLAG_8023)
1259 			return;
1260 
1261 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1262 			return;
1263 
1264 		wcid = NULL;
1265 	}
1266 
1267 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1268 	    status->ampdu_ref != dev->rx_ampdu_ref)
1269 		mt76_airtime_flush_ampdu(dev);
1270 
1271 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1272 		if (!dev->rx_ampdu_len ||
1273 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1274 			dev->rx_ampdu_status = *status;
1275 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1276 			dev->rx_ampdu_ref = status->ampdu_ref;
1277 		}
1278 
1279 		dev->rx_ampdu_len += skb->len;
1280 		return;
1281 	}
1282 
1283 	mt76_airtime_report(dev, status, skb->len);
1284 }
1285 
1286 static void
1287 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1288 {
1289 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1290 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1291 	struct ieee80211_sta *sta;
1292 	struct ieee80211_hw *hw;
1293 	struct mt76_wcid *wcid = status->wcid;
1294 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1295 	bool ps;
1296 
1297 	hw = mt76_phy_hw(dev, status->phy_idx);
1298 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1299 	    !(status->flag & RX_FLAG_8023)) {
1300 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1301 		if (sta)
1302 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1303 	}
1304 
1305 	mt76_airtime_check(dev, skb);
1306 
1307 	if (!wcid || !wcid->sta)
1308 		return;
1309 
1310 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1311 
1312 	if (status->signal <= 0)
1313 		ewma_signal_add(&wcid->rssi, -status->signal);
1314 
1315 	wcid->inactive_count = 0;
1316 
1317 	if (status->flag & RX_FLAG_8023)
1318 		return;
1319 
1320 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1321 		return;
1322 
1323 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1324 		ieee80211_sta_pspoll(sta);
1325 		return;
1326 	}
1327 
1328 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1329 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1330 	      ieee80211_is_data(hdr->frame_control)))
1331 		return;
1332 
1333 	ps = ieee80211_has_pm(hdr->frame_control);
1334 
1335 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1336 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1337 		ieee80211_sta_uapsd_trigger(sta, tidno);
1338 
1339 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1340 		return;
1341 
1342 	if (ps)
1343 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1344 
1345 	if (dev->drv->sta_ps)
1346 		dev->drv->sta_ps(dev, sta, ps);
1347 
1348 	if (!ps)
1349 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1350 
1351 	ieee80211_sta_ps_transition(sta, ps);
1352 }
1353 
1354 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1355 		      struct napi_struct *napi)
1356 {
1357 	struct ieee80211_sta *sta;
1358 	struct ieee80211_hw *hw;
1359 	struct sk_buff *skb, *tmp;
1360 #if defined(__linux__)
1361 	LIST_HEAD(list);
1362 #elif defined(__FreeBSD__)
1363 	LINUX_LIST_HEAD(list);
1364 #endif
1365 
1366 	spin_lock(&dev->rx_lock);
1367 	while ((skb = __skb_dequeue(frames)) != NULL) {
1368 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1369 
1370 		mt76_check_ccmp_pn(skb);
1371 		skb_shinfo(skb)->frag_list = NULL;
1372 		mt76_rx_convert(dev, skb, &hw, &sta);
1373 		ieee80211_rx_list(hw, sta, skb, &list);
1374 
1375 		/* subsequent amsdu frames */
1376 		while (nskb) {
1377 			skb = nskb;
1378 			nskb = nskb->next;
1379 			skb->next = NULL;
1380 
1381 			mt76_rx_convert(dev, skb, &hw, &sta);
1382 			ieee80211_rx_list(hw, sta, skb, &list);
1383 		}
1384 	}
1385 	spin_unlock(&dev->rx_lock);
1386 
1387 	if (!napi) {
1388 		netif_receive_skb_list(&list);
1389 		return;
1390 	}
1391 
1392 	list_for_each_entry_safe(skb, tmp, &list, list) {
1393 		skb_list_del_init(skb);
1394 		napi_gro_receive(napi, skb);
1395 	}
1396 }
1397 
1398 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1399 			   struct napi_struct *napi)
1400 {
1401 	struct sk_buff_head frames;
1402 	struct sk_buff *skb;
1403 
1404 	__skb_queue_head_init(&frames);
1405 
1406 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1407 		mt76_check_sta(dev, skb);
1408 		if (mtk_wed_device_active(&dev->mmio.wed))
1409 			__skb_queue_tail(&frames, skb);
1410 		else
1411 			mt76_rx_aggr_reorder(skb, &frames);
1412 	}
1413 
1414 	mt76_rx_complete(dev, &frames, napi);
1415 }
1416 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1417 
1418 static int
1419 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1420 	     struct ieee80211_sta *sta)
1421 {
1422 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1423 	struct mt76_dev *dev = phy->dev;
1424 	int ret;
1425 	int i;
1426 
1427 	mutex_lock(&dev->mutex);
1428 
1429 	ret = dev->drv->sta_add(dev, vif, sta);
1430 	if (ret)
1431 		goto out;
1432 
1433 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1434 		struct mt76_txq *mtxq;
1435 
1436 		if (!sta->txq[i])
1437 			continue;
1438 
1439 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1440 		mtxq->wcid = wcid->idx;
1441 	}
1442 
1443 	ewma_signal_init(&wcid->rssi);
1444 	if (phy->band_idx == MT_BAND1)
1445 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1446 	wcid->phy_idx = phy->band_idx;
1447 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1448 
1449 	mt76_packet_id_init(wcid);
1450 out:
1451 	mutex_unlock(&dev->mutex);
1452 
1453 	return ret;
1454 }
1455 
1456 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1457 		       struct ieee80211_sta *sta)
1458 {
1459 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1460 	int i, idx = wcid->idx;
1461 
1462 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1463 		mt76_rx_aggr_stop(dev, wcid, i);
1464 
1465 	if (dev->drv->sta_remove)
1466 		dev->drv->sta_remove(dev, vif, sta);
1467 
1468 	mt76_packet_id_flush(dev, wcid);
1469 
1470 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1471 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1472 }
1473 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1474 
1475 static void
1476 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1477 		struct ieee80211_sta *sta)
1478 {
1479 	mutex_lock(&dev->mutex);
1480 	__mt76_sta_remove(dev, vif, sta);
1481 	mutex_unlock(&dev->mutex);
1482 }
1483 
1484 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1485 		   struct ieee80211_sta *sta,
1486 		   enum ieee80211_sta_state old_state,
1487 		   enum ieee80211_sta_state new_state)
1488 {
1489 	struct mt76_phy *phy = hw->priv;
1490 	struct mt76_dev *dev = phy->dev;
1491 
1492 	if (old_state == IEEE80211_STA_NOTEXIST &&
1493 	    new_state == IEEE80211_STA_NONE)
1494 		return mt76_sta_add(phy, vif, sta);
1495 
1496 	if (old_state == IEEE80211_STA_AUTH &&
1497 	    new_state == IEEE80211_STA_ASSOC &&
1498 	    dev->drv->sta_assoc)
1499 		dev->drv->sta_assoc(dev, vif, sta);
1500 
1501 	if (old_state == IEEE80211_STA_NONE &&
1502 	    new_state == IEEE80211_STA_NOTEXIST)
1503 		mt76_sta_remove(dev, vif, sta);
1504 
1505 	return 0;
1506 }
1507 EXPORT_SYMBOL_GPL(mt76_sta_state);
1508 
1509 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1510 			     struct ieee80211_sta *sta)
1511 {
1512 	struct mt76_phy *phy = hw->priv;
1513 	struct mt76_dev *dev = phy->dev;
1514 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1515 
1516 	mutex_lock(&dev->mutex);
1517 	spin_lock_bh(&dev->status_lock);
1518 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1519 	spin_unlock_bh(&dev->status_lock);
1520 	mutex_unlock(&dev->mutex);
1521 }
1522 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1523 
1524 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1525 		     int *dbm)
1526 {
1527 	struct mt76_phy *phy = hw->priv;
1528 	int n_chains = hweight8(phy->antenna_mask);
1529 	int delta = mt76_tx_power_nss_delta(n_chains);
1530 
1531 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1532 
1533 	return 0;
1534 }
1535 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1536 
1537 int mt76_init_sar_power(struct ieee80211_hw *hw,
1538 			const struct cfg80211_sar_specs *sar)
1539 {
1540 	struct mt76_phy *phy = hw->priv;
1541 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1542 	int i;
1543 
1544 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1545 		return -EINVAL;
1546 
1547 	for (i = 0; i < sar->num_sub_specs; i++) {
1548 		u32 index = sar->sub_specs[i].freq_range_index;
1549 		/* SAR specifies power limitaton in 0.25dbm */
1550 		s32 power = sar->sub_specs[i].power >> 1;
1551 
1552 		if (power > 127 || power < -127)
1553 			power = 127;
1554 
1555 		phy->frp[index].range = &capa->freq_ranges[index];
1556 		phy->frp[index].power = power;
1557 	}
1558 
1559 	return 0;
1560 }
1561 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1562 
1563 int mt76_get_sar_power(struct mt76_phy *phy,
1564 		       struct ieee80211_channel *chan,
1565 		       int power)
1566 {
1567 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1568 	int freq, i;
1569 
1570 	if (!capa || !phy->frp)
1571 		return power;
1572 
1573 	if (power > 127 || power < -127)
1574 		power = 127;
1575 
1576 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1577 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1578 		if (phy->frp[i].range &&
1579 		    freq >= phy->frp[i].range->start_freq &&
1580 		    freq < phy->frp[i].range->end_freq) {
1581 			power = min_t(int, phy->frp[i].power, power);
1582 			break;
1583 		}
1584 	}
1585 
1586 	return power;
1587 }
1588 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1589 
1590 static void
1591 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1592 {
1593 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1594 		ieee80211_csa_finish(vif);
1595 }
1596 
1597 void mt76_csa_finish(struct mt76_dev *dev)
1598 {
1599 	if (!dev->csa_complete)
1600 		return;
1601 
1602 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1603 		IEEE80211_IFACE_ITER_RESUME_ALL,
1604 		__mt76_csa_finish, dev);
1605 
1606 	dev->csa_complete = 0;
1607 }
1608 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1609 
1610 static void
1611 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1612 {
1613 	struct mt76_dev *dev = priv;
1614 
1615 	if (!vif->bss_conf.csa_active)
1616 		return;
1617 
1618 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1619 }
1620 
1621 void mt76_csa_check(struct mt76_dev *dev)
1622 {
1623 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1624 		IEEE80211_IFACE_ITER_RESUME_ALL,
1625 		__mt76_csa_check, dev);
1626 }
1627 EXPORT_SYMBOL_GPL(mt76_csa_check);
1628 
1629 int
1630 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1631 {
1632 	return 0;
1633 }
1634 EXPORT_SYMBOL_GPL(mt76_set_tim);
1635 
1636 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1637 {
1638 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1639 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1640 	u8 *hdr, *pn = status->iv;
1641 
1642 	__skb_push(skb, 8);
1643 	memmove(skb->data, skb->data + 8, hdr_len);
1644 	hdr = skb->data + hdr_len;
1645 
1646 	hdr[0] = pn[5];
1647 	hdr[1] = pn[4];
1648 	hdr[2] = 0;
1649 	hdr[3] = 0x20 | (key_id << 6);
1650 	hdr[4] = pn[3];
1651 	hdr[5] = pn[2];
1652 	hdr[6] = pn[1];
1653 	hdr[7] = pn[0];
1654 
1655 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1656 }
1657 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1658 
1659 int mt76_get_rate(struct mt76_dev *dev,
1660 		  struct ieee80211_supported_band *sband,
1661 		  int idx, bool cck)
1662 {
1663 	int i, offset = 0, len = sband->n_bitrates;
1664 
1665 	if (cck) {
1666 		if (sband != &dev->phy.sband_2g.sband)
1667 			return 0;
1668 
1669 		idx &= ~BIT(2); /* short preamble */
1670 	} else if (sband == &dev->phy.sband_2g.sband) {
1671 		offset = 4;
1672 	}
1673 
1674 	for (i = offset; i < len; i++) {
1675 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1676 			return i;
1677 	}
1678 
1679 	return 0;
1680 }
1681 EXPORT_SYMBOL_GPL(mt76_get_rate);
1682 
1683 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1684 		  const u8 *mac)
1685 {
1686 	struct mt76_phy *phy = hw->priv;
1687 
1688 	set_bit(MT76_SCANNING, &phy->state);
1689 }
1690 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1691 
1692 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1693 {
1694 	struct mt76_phy *phy = hw->priv;
1695 
1696 	clear_bit(MT76_SCANNING, &phy->state);
1697 }
1698 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1699 
1700 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1701 {
1702 	struct mt76_phy *phy = hw->priv;
1703 	struct mt76_dev *dev = phy->dev;
1704 
1705 	mutex_lock(&dev->mutex);
1706 	*tx_ant = phy->antenna_mask;
1707 	*rx_ant = phy->antenna_mask;
1708 	mutex_unlock(&dev->mutex);
1709 
1710 	return 0;
1711 }
1712 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1713 
1714 struct mt76_queue *
1715 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1716 		int ring_base, u32 flags)
1717 {
1718 	struct mt76_queue *hwq;
1719 	int err;
1720 
1721 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1722 	if (!hwq)
1723 		return ERR_PTR(-ENOMEM);
1724 
1725 	hwq->flags = flags;
1726 
1727 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1728 	if (err < 0)
1729 		return ERR_PTR(err);
1730 
1731 	return hwq;
1732 }
1733 EXPORT_SYMBOL_GPL(mt76_init_queue);
1734 
1735 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
1736 {
1737 	int offset = 0;
1738 
1739 	if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
1740 		offset = 4;
1741 
1742 	/* pick the lowest rate for hidden nodes */
1743 	if (rateidx < 0)
1744 		rateidx = 0;
1745 
1746 	rateidx += offset;
1747 	if (rateidx >= ARRAY_SIZE(mt76_rates))
1748 		rateidx = offset;
1749 
1750 	return mt76_rates[rateidx].hw_value;
1751 }
1752 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1753 
1754 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1755 			 struct mt76_sta_stats *stats, bool eht)
1756 {
1757 	int i, ei = wi->initial_stat_idx;
1758 	u64 *data = wi->data;
1759 
1760 	wi->sta_count++;
1761 
1762 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1763 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1764 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1765 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1766 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1767 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1768 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1769 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1770 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1771 	if (eht) {
1772 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1773 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1774 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1775 	}
1776 
1777 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1778 		data[ei++] += stats->tx_bw[i];
1779 
1780 	for (i = 0; i < (eht ? 14 : 12); i++)
1781 		data[ei++] += stats->tx_mcs[i];
1782 
1783 	for (i = 0; i < 4; i++)
1784 		data[ei++] += stats->tx_nss[i];
1785 
1786 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1787 }
1788 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1789 
1790 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1791 {
1792 #ifdef CONFIG_PAGE_POOL_STATS
1793 	struct page_pool_stats stats = {};
1794 	int i;
1795 
1796 	mt76_for_each_q_rx(dev, i)
1797 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1798 
1799 	page_pool_ethtool_stats_get(data, &stats);
1800 	*index += page_pool_ethtool_stats_get_count();
1801 #endif
1802 }
1803 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1804 
1805 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1806 {
1807 	struct ieee80211_hw *hw = phy->hw;
1808 	struct mt76_dev *dev = phy->dev;
1809 
1810 	if (dev->region == NL80211_DFS_UNSET ||
1811 	    test_bit(MT76_SCANNING, &phy->state))
1812 		return MT_DFS_STATE_DISABLED;
1813 
1814 	if (!hw->conf.radar_enabled) {
1815 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1816 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1817 			return MT_DFS_STATE_ACTIVE;
1818 
1819 		return MT_DFS_STATE_DISABLED;
1820 	}
1821 
1822 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1823 		return MT_DFS_STATE_CAC;
1824 
1825 	return MT_DFS_STATE_ACTIVE;
1826 }
1827 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1828