xref: /linux/drivers/net/wireless/mediatek/mt76/mac80211.c (revision 4eca0ef49af9b2b0c52ef2b58e045ab34629796b)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 
201 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
202 		return 0;
203 
204 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
205 		 wiphy_name(hw->wiphy));
206 
207 	phy->leds.cdev.name = phy->leds.name;
208 	phy->leds.cdev.default_trigger =
209 		ieee80211_create_tpt_led_trigger(hw,
210 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
211 					mt76_tpt_blink,
212 					ARRAY_SIZE(mt76_tpt_blink));
213 
214 	if (phy == &dev->phy) {
215 		struct device_node *np = dev->dev->of_node;
216 
217 		np = of_get_child_by_name(np, "led");
218 		if (np) {
219 			int led_pin;
220 
221 			if (!of_property_read_u32(np, "led-sources", &led_pin))
222 				phy->leds.pin = led_pin;
223 			phy->leds.al = of_property_read_bool(np,
224 							     "led-active-low");
225 			of_node_put(np);
226 		}
227 	}
228 
229 	return led_classdev_register(dev->dev, &phy->leds.cdev);
230 }
231 
232 static void mt76_led_cleanup(struct mt76_phy *phy)
233 {
234 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
235 		return;
236 
237 	led_classdev_unregister(&phy->leds.cdev);
238 }
239 
240 static void mt76_init_stream_cap(struct mt76_phy *phy,
241 				 struct ieee80211_supported_band *sband,
242 				 bool vht)
243 {
244 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
245 	int i, nstream = hweight8(phy->antenna_mask);
246 	struct ieee80211_sta_vht_cap *vht_cap;
247 	u16 mcs_map = 0;
248 
249 	if (nstream > 1)
250 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
251 	else
252 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
253 
254 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
255 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
256 
257 	if (!vht)
258 		return;
259 
260 	vht_cap = &sband->vht_cap;
261 	if (nstream > 1)
262 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
263 	else
264 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
265 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
266 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
267 
268 	for (i = 0; i < 8; i++) {
269 		if (i < nstream)
270 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
271 		else
272 			mcs_map |=
273 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
274 	}
275 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
276 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
277 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
278 		vht_cap->vht_mcs.tx_highest |=
279 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
280 }
281 
282 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
283 {
284 	if (phy->cap.has_2ghz)
285 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
286 	if (phy->cap.has_5ghz)
287 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
288 	if (phy->cap.has_6ghz)
289 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
290 }
291 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
292 
293 static int
294 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
295 		const struct ieee80211_channel *chan, int n_chan,
296 		struct ieee80211_rate *rates, int n_rates,
297 		bool ht, bool vht)
298 {
299 	struct ieee80211_supported_band *sband = &msband->sband;
300 	struct ieee80211_sta_vht_cap *vht_cap;
301 	struct ieee80211_sta_ht_cap *ht_cap;
302 	struct mt76_dev *dev = phy->dev;
303 	void *chanlist;
304 	int size;
305 
306 	size = n_chan * sizeof(*chan);
307 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
308 	if (!chanlist)
309 		return -ENOMEM;
310 
311 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
312 				    GFP_KERNEL);
313 	if (!msband->chan)
314 		return -ENOMEM;
315 
316 	sband->channels = chanlist;
317 	sband->n_channels = n_chan;
318 	sband->bitrates = rates;
319 	sband->n_bitrates = n_rates;
320 
321 	if (!ht)
322 		return 0;
323 
324 	ht_cap = &sband->ht_cap;
325 	ht_cap->ht_supported = true;
326 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
327 		       IEEE80211_HT_CAP_GRN_FLD |
328 		       IEEE80211_HT_CAP_SGI_20 |
329 		       IEEE80211_HT_CAP_SGI_40 |
330 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
331 
332 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
333 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
334 
335 	mt76_init_stream_cap(phy, sband, vht);
336 
337 	if (!vht)
338 		return 0;
339 
340 	vht_cap = &sband->vht_cap;
341 	vht_cap->vht_supported = true;
342 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
343 			IEEE80211_VHT_CAP_RXSTBC_1 |
344 			IEEE80211_VHT_CAP_SHORT_GI_80 |
345 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
346 
347 	return 0;
348 }
349 
350 static int
351 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
352 		   int n_rates)
353 {
354 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
355 
356 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
357 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
358 			       n_rates, true, false);
359 }
360 
361 static int
362 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates, bool vht)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
368 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
369 			       n_rates, true, vht);
370 }
371 
372 static int
373 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
379 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
380 			       n_rates, false, false);
381 }
382 
383 static void
384 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
385 		 enum nl80211_band band)
386 {
387 	struct ieee80211_supported_band *sband = &msband->sband;
388 	bool found = false;
389 	int i;
390 
391 	if (!sband)
392 		return;
393 
394 	for (i = 0; i < sband->n_channels; i++) {
395 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
396 			continue;
397 
398 		found = true;
399 		break;
400 	}
401 
402 	if (found) {
403 		phy->chandef.chan = &sband->channels[0];
404 		phy->chan_state = &msband->chan[0];
405 		return;
406 	}
407 
408 	sband->n_channels = 0;
409 	phy->hw->wiphy->bands[band] = NULL;
410 }
411 
412 static int
413 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
414 {
415 	struct mt76_dev *dev = phy->dev;
416 	struct wiphy *wiphy = hw->wiphy;
417 
418 	INIT_LIST_HEAD(&phy->tx_list);
419 	spin_lock_init(&phy->tx_lock);
420 
421 	SET_IEEE80211_DEV(hw, dev->dev);
422 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
423 
424 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
425 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
426 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
427 			WIPHY_FLAG_SUPPORTS_TDLS |
428 			WIPHY_FLAG_AP_UAPSD;
429 
430 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
431 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
432 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
433 
434 	wiphy->available_antennas_tx = phy->antenna_mask;
435 	wiphy->available_antennas_rx = phy->antenna_mask;
436 
437 	wiphy->sar_capa = &mt76_sar_capa;
438 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
439 				sizeof(struct mt76_freq_range_power),
440 				GFP_KERNEL);
441 	if (!phy->frp)
442 		return -ENOMEM;
443 
444 	hw->txq_data_size = sizeof(struct mt76_txq);
445 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
446 
447 	if (!hw->max_tx_fragments)
448 		hw->max_tx_fragments = 16;
449 
450 	ieee80211_hw_set(hw, SIGNAL_DBM);
451 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
452 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
453 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
454 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
455 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
456 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
457 
458 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
459 	    hw->max_tx_fragments > 1) {
460 		ieee80211_hw_set(hw, TX_AMSDU);
461 		ieee80211_hw_set(hw, TX_FRAG_LIST);
462 	}
463 
464 	ieee80211_hw_set(hw, MFP_CAPABLE);
465 	ieee80211_hw_set(hw, AP_LINK_PS);
466 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
467 
468 	return 0;
469 }
470 
471 struct mt76_phy *
472 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
473 	       const struct ieee80211_ops *ops, u8 band_idx)
474 {
475 	struct ieee80211_hw *hw;
476 	unsigned int phy_size;
477 	struct mt76_phy *phy;
478 
479 	phy_size = ALIGN(sizeof(*phy), 8);
480 	hw = ieee80211_alloc_hw(size + phy_size, ops);
481 	if (!hw)
482 		return NULL;
483 
484 	phy = hw->priv;
485 	phy->dev = dev;
486 	phy->hw = hw;
487 	phy->priv = hw->priv + phy_size;
488 	phy->band_idx = band_idx;
489 
490 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
491 	hw->wiphy->interface_modes =
492 		BIT(NL80211_IFTYPE_STATION) |
493 		BIT(NL80211_IFTYPE_AP) |
494 #ifdef CONFIG_MAC80211_MESH
495 		BIT(NL80211_IFTYPE_MESH_POINT) |
496 #endif
497 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
498 		BIT(NL80211_IFTYPE_P2P_GO) |
499 		BIT(NL80211_IFTYPE_ADHOC);
500 
501 	return phy;
502 }
503 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
504 
505 int mt76_register_phy(struct mt76_phy *phy, bool vht,
506 		      struct ieee80211_rate *rates, int n_rates)
507 {
508 	int ret;
509 
510 	ret = mt76_phy_init(phy, phy->hw);
511 	if (ret)
512 		return ret;
513 
514 	if (phy->cap.has_2ghz) {
515 		ret = mt76_init_sband_2g(phy, rates, n_rates);
516 		if (ret)
517 			return ret;
518 	}
519 
520 	if (phy->cap.has_5ghz) {
521 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
522 		if (ret)
523 			return ret;
524 	}
525 
526 	if (phy->cap.has_6ghz) {
527 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
528 		if (ret)
529 			return ret;
530 	}
531 
532 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
533 		ret = mt76_led_init(phy);
534 		if (ret)
535 			return ret;
536 	}
537 
538 	wiphy_read_of_freq_limits(phy->hw->wiphy);
539 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
540 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
541 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
542 
543 	ret = ieee80211_register_hw(phy->hw);
544 	if (ret)
545 		return ret;
546 
547 	set_bit(MT76_STATE_REGISTERED, &phy->state);
548 	phy->dev->phys[phy->band_idx] = phy;
549 
550 	return 0;
551 }
552 EXPORT_SYMBOL_GPL(mt76_register_phy);
553 
554 void mt76_unregister_phy(struct mt76_phy *phy)
555 {
556 	struct mt76_dev *dev = phy->dev;
557 
558 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
559 		return;
560 
561 	if (IS_ENABLED(CONFIG_MT76_LEDS))
562 		mt76_led_cleanup(phy);
563 	mt76_tx_status_check(dev, true);
564 	ieee80211_unregister_hw(phy->hw);
565 	dev->phys[phy->band_idx] = NULL;
566 }
567 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
568 
569 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
570 {
571 	struct page_pool_params pp_params = {
572 		.order = 0,
573 		.flags = 0,
574 		.nid = NUMA_NO_NODE,
575 		.dev = dev->dma_dev,
576 	};
577 	int idx = q - dev->q_rx;
578 
579 	switch (idx) {
580 	case MT_RXQ_MAIN:
581 	case MT_RXQ_BAND1:
582 	case MT_RXQ_BAND2:
583 		pp_params.pool_size = 256;
584 		break;
585 	default:
586 		pp_params.pool_size = 16;
587 		break;
588 	}
589 
590 	if (mt76_is_mmio(dev)) {
591 		/* rely on page_pool for DMA mapping */
592 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
593 		pp_params.dma_dir = DMA_FROM_DEVICE;
594 		pp_params.max_len = PAGE_SIZE;
595 		pp_params.offset = 0;
596 	}
597 
598 	q->page_pool = page_pool_create(&pp_params);
599 	if (IS_ERR(q->page_pool)) {
600 		int err = PTR_ERR(q->page_pool);
601 
602 		q->page_pool = NULL;
603 		return err;
604 	}
605 
606 	return 0;
607 }
608 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
609 
610 struct mt76_dev *
611 mt76_alloc_device(struct device *pdev, unsigned int size,
612 		  const struct ieee80211_ops *ops,
613 		  const struct mt76_driver_ops *drv_ops)
614 {
615 	struct ieee80211_hw *hw;
616 	struct mt76_phy *phy;
617 	struct mt76_dev *dev;
618 	int i;
619 
620 	hw = ieee80211_alloc_hw(size, ops);
621 	if (!hw)
622 		return NULL;
623 
624 	dev = hw->priv;
625 	dev->hw = hw;
626 	dev->dev = pdev;
627 	dev->drv = drv_ops;
628 	dev->dma_dev = pdev;
629 
630 	phy = &dev->phy;
631 	phy->dev = dev;
632 	phy->hw = hw;
633 	phy->band_idx = MT_BAND0;
634 	dev->phys[phy->band_idx] = phy;
635 
636 	spin_lock_init(&dev->rx_lock);
637 	spin_lock_init(&dev->lock);
638 	spin_lock_init(&dev->cc_lock);
639 	spin_lock_init(&dev->status_lock);
640 	spin_lock_init(&dev->wed_lock);
641 	mutex_init(&dev->mutex);
642 	init_waitqueue_head(&dev->tx_wait);
643 
644 	skb_queue_head_init(&dev->mcu.res_q);
645 	init_waitqueue_head(&dev->mcu.wait);
646 	mutex_init(&dev->mcu.mutex);
647 	dev->tx_worker.fn = mt76_tx_worker;
648 
649 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
650 	hw->wiphy->interface_modes =
651 		BIT(NL80211_IFTYPE_STATION) |
652 		BIT(NL80211_IFTYPE_AP) |
653 #ifdef CONFIG_MAC80211_MESH
654 		BIT(NL80211_IFTYPE_MESH_POINT) |
655 #endif
656 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
657 		BIT(NL80211_IFTYPE_P2P_GO) |
658 		BIT(NL80211_IFTYPE_ADHOC);
659 
660 	spin_lock_init(&dev->token_lock);
661 	idr_init(&dev->token);
662 
663 	spin_lock_init(&dev->rx_token_lock);
664 	idr_init(&dev->rx_token);
665 
666 	INIT_LIST_HEAD(&dev->wcid_list);
667 	INIT_LIST_HEAD(&dev->sta_poll_list);
668 	spin_lock_init(&dev->sta_poll_lock);
669 
670 	INIT_LIST_HEAD(&dev->txwi_cache);
671 	INIT_LIST_HEAD(&dev->rxwi_cache);
672 	dev->token_size = dev->drv->token_size;
673 
674 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
675 		skb_queue_head_init(&dev->rx_skb[i]);
676 
677 	dev->wq = alloc_ordered_workqueue("mt76", 0);
678 	if (!dev->wq) {
679 		ieee80211_free_hw(hw);
680 		return NULL;
681 	}
682 
683 	return dev;
684 }
685 EXPORT_SYMBOL_GPL(mt76_alloc_device);
686 
687 int mt76_register_device(struct mt76_dev *dev, bool vht,
688 			 struct ieee80211_rate *rates, int n_rates)
689 {
690 	struct ieee80211_hw *hw = dev->hw;
691 	struct mt76_phy *phy = &dev->phy;
692 	int ret;
693 
694 	dev_set_drvdata(dev->dev, dev);
695 	mt76_wcid_init(&dev->global_wcid);
696 	ret = mt76_phy_init(phy, hw);
697 	if (ret)
698 		return ret;
699 
700 	if (phy->cap.has_2ghz) {
701 		ret = mt76_init_sband_2g(phy, rates, n_rates);
702 		if (ret)
703 			return ret;
704 	}
705 
706 	if (phy->cap.has_5ghz) {
707 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
708 		if (ret)
709 			return ret;
710 	}
711 
712 	if (phy->cap.has_6ghz) {
713 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
714 		if (ret)
715 			return ret;
716 	}
717 
718 	wiphy_read_of_freq_limits(hw->wiphy);
719 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
720 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
721 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
722 
723 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
724 		ret = mt76_led_init(phy);
725 		if (ret)
726 			return ret;
727 	}
728 
729 	ret = ieee80211_register_hw(hw);
730 	if (ret)
731 		return ret;
732 
733 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
734 	set_bit(MT76_STATE_REGISTERED, &phy->state);
735 	sched_set_fifo_low(dev->tx_worker.task);
736 
737 	return 0;
738 }
739 EXPORT_SYMBOL_GPL(mt76_register_device);
740 
741 void mt76_unregister_device(struct mt76_dev *dev)
742 {
743 	struct ieee80211_hw *hw = dev->hw;
744 
745 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
746 		return;
747 
748 	if (IS_ENABLED(CONFIG_MT76_LEDS))
749 		mt76_led_cleanup(&dev->phy);
750 	mt76_tx_status_check(dev, true);
751 	mt76_wcid_cleanup(dev, &dev->global_wcid);
752 	ieee80211_unregister_hw(hw);
753 }
754 EXPORT_SYMBOL_GPL(mt76_unregister_device);
755 
756 void mt76_free_device(struct mt76_dev *dev)
757 {
758 	mt76_worker_teardown(&dev->tx_worker);
759 	if (dev->wq) {
760 		destroy_workqueue(dev->wq);
761 		dev->wq = NULL;
762 	}
763 	ieee80211_free_hw(dev->hw);
764 }
765 EXPORT_SYMBOL_GPL(mt76_free_device);
766 
767 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
768 {
769 	struct sk_buff *skb = phy->rx_amsdu[q].head;
770 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
771 	struct mt76_dev *dev = phy->dev;
772 
773 	phy->rx_amsdu[q].head = NULL;
774 	phy->rx_amsdu[q].tail = NULL;
775 
776 	/*
777 	 * Validate if the amsdu has a proper first subframe.
778 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
779 	 * flag of the QoS header gets flipped. In such cases, the first
780 	 * subframe has a LLC/SNAP header in the location of the destination
781 	 * address.
782 	 */
783 	if (skb_shinfo(skb)->frag_list) {
784 		int offset = 0;
785 
786 		if (!(status->flag & RX_FLAG_8023)) {
787 			offset = ieee80211_get_hdrlen_from_skb(skb);
788 
789 			if ((status->flag &
790 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
791 			    RX_FLAG_DECRYPTED)
792 				offset += 8;
793 		}
794 
795 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
796 			dev_kfree_skb(skb);
797 			return;
798 		}
799 	}
800 	__skb_queue_tail(&dev->rx_skb[q], skb);
801 }
802 
803 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
804 				  struct sk_buff *skb)
805 {
806 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
807 
808 	if (phy->rx_amsdu[q].head &&
809 	    (!status->amsdu || status->first_amsdu ||
810 	     status->seqno != phy->rx_amsdu[q].seqno))
811 		mt76_rx_release_amsdu(phy, q);
812 
813 	if (!phy->rx_amsdu[q].head) {
814 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
815 		phy->rx_amsdu[q].seqno = status->seqno;
816 		phy->rx_amsdu[q].head = skb;
817 	} else {
818 		*phy->rx_amsdu[q].tail = skb;
819 		phy->rx_amsdu[q].tail = &skb->next;
820 	}
821 
822 	if (!status->amsdu || status->last_amsdu)
823 		mt76_rx_release_amsdu(phy, q);
824 }
825 
826 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
827 {
828 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
829 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
830 
831 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
832 		dev_kfree_skb(skb);
833 		return;
834 	}
835 
836 #ifdef CONFIG_NL80211_TESTMODE
837 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
838 		phy->test.rx_stats.packets[q]++;
839 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
840 			phy->test.rx_stats.fcs_error[q]++;
841 	}
842 #endif
843 
844 	mt76_rx_release_burst(phy, q, skb);
845 }
846 EXPORT_SYMBOL_GPL(mt76_rx);
847 
848 bool mt76_has_tx_pending(struct mt76_phy *phy)
849 {
850 	struct mt76_queue *q;
851 	int i;
852 
853 	for (i = 0; i < __MT_TXQ_MAX; i++) {
854 		q = phy->q_tx[i];
855 		if (q && q->queued)
856 			return true;
857 	}
858 
859 	return false;
860 }
861 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
862 
863 static struct mt76_channel_state *
864 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
865 {
866 	struct mt76_sband *msband;
867 	int idx;
868 
869 	if (c->band == NL80211_BAND_2GHZ)
870 		msband = &phy->sband_2g;
871 	else if (c->band == NL80211_BAND_6GHZ)
872 		msband = &phy->sband_6g;
873 	else
874 		msband = &phy->sband_5g;
875 
876 	idx = c - &msband->sband.channels[0];
877 	return &msband->chan[idx];
878 }
879 
880 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
881 {
882 	struct mt76_channel_state *state = phy->chan_state;
883 
884 	state->cc_active += ktime_to_us(ktime_sub(time,
885 						  phy->survey_time));
886 	phy->survey_time = time;
887 }
888 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
889 
890 void mt76_update_survey(struct mt76_phy *phy)
891 {
892 	struct mt76_dev *dev = phy->dev;
893 	ktime_t cur_time;
894 
895 	if (dev->drv->update_survey)
896 		dev->drv->update_survey(phy);
897 
898 	cur_time = ktime_get_boottime();
899 	mt76_update_survey_active_time(phy, cur_time);
900 
901 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
902 		struct mt76_channel_state *state = phy->chan_state;
903 
904 		spin_lock_bh(&dev->cc_lock);
905 		state->cc_bss_rx += dev->cur_cc_bss_rx;
906 		dev->cur_cc_bss_rx = 0;
907 		spin_unlock_bh(&dev->cc_lock);
908 	}
909 }
910 EXPORT_SYMBOL_GPL(mt76_update_survey);
911 
912 void mt76_set_channel(struct mt76_phy *phy)
913 {
914 	struct mt76_dev *dev = phy->dev;
915 	struct ieee80211_hw *hw = phy->hw;
916 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
917 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
918 	int timeout = HZ / 5;
919 
920 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
921 	mt76_update_survey(phy);
922 
923 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
924 	    phy->chandef.width != chandef->width)
925 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
926 
927 	phy->chandef = *chandef;
928 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
929 
930 	if (!offchannel)
931 		phy->main_chan = chandef->chan;
932 
933 	if (chandef->chan != phy->main_chan)
934 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
935 }
936 EXPORT_SYMBOL_GPL(mt76_set_channel);
937 
938 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
939 		    struct survey_info *survey)
940 {
941 	struct mt76_phy *phy = hw->priv;
942 	struct mt76_dev *dev = phy->dev;
943 	struct mt76_sband *sband;
944 	struct ieee80211_channel *chan;
945 	struct mt76_channel_state *state;
946 	int ret = 0;
947 
948 	mutex_lock(&dev->mutex);
949 	if (idx == 0 && dev->drv->update_survey)
950 		mt76_update_survey(phy);
951 
952 	if (idx >= phy->sband_2g.sband.n_channels +
953 		   phy->sband_5g.sband.n_channels) {
954 		idx -= (phy->sband_2g.sband.n_channels +
955 			phy->sband_5g.sband.n_channels);
956 		sband = &phy->sband_6g;
957 	} else if (idx >= phy->sband_2g.sband.n_channels) {
958 		idx -= phy->sband_2g.sband.n_channels;
959 		sband = &phy->sband_5g;
960 	} else {
961 		sband = &phy->sband_2g;
962 	}
963 
964 	if (idx >= sband->sband.n_channels) {
965 		ret = -ENOENT;
966 		goto out;
967 	}
968 
969 	chan = &sband->sband.channels[idx];
970 	state = mt76_channel_state(phy, chan);
971 
972 	memset(survey, 0, sizeof(*survey));
973 	survey->channel = chan;
974 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
975 	survey->filled |= dev->drv->survey_flags;
976 	if (state->noise)
977 		survey->filled |= SURVEY_INFO_NOISE_DBM;
978 
979 	if (chan == phy->main_chan) {
980 		survey->filled |= SURVEY_INFO_IN_USE;
981 
982 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
983 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
984 	}
985 
986 	survey->time_busy = div_u64(state->cc_busy, 1000);
987 	survey->time_rx = div_u64(state->cc_rx, 1000);
988 	survey->time = div_u64(state->cc_active, 1000);
989 	survey->noise = state->noise;
990 
991 	spin_lock_bh(&dev->cc_lock);
992 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
993 	survey->time_tx = div_u64(state->cc_tx, 1000);
994 	spin_unlock_bh(&dev->cc_lock);
995 
996 out:
997 	mutex_unlock(&dev->mutex);
998 
999 	return ret;
1000 }
1001 EXPORT_SYMBOL_GPL(mt76_get_survey);
1002 
1003 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1004 			 struct ieee80211_key_conf *key)
1005 {
1006 	struct ieee80211_key_seq seq;
1007 	int i;
1008 
1009 	wcid->rx_check_pn = false;
1010 
1011 	if (!key)
1012 		return;
1013 
1014 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1015 		return;
1016 
1017 	wcid->rx_check_pn = true;
1018 
1019 	/* data frame */
1020 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1021 		ieee80211_get_key_rx_seq(key, i, &seq);
1022 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1023 	}
1024 
1025 	/* robust management frame */
1026 	ieee80211_get_key_rx_seq(key, -1, &seq);
1027 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1028 
1029 }
1030 EXPORT_SYMBOL(mt76_wcid_key_setup);
1031 
1032 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1033 {
1034 	int signal = -128;
1035 	u8 chains;
1036 
1037 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1038 		int cur, diff;
1039 
1040 		cur = *chain_signal;
1041 		if (!(chains & BIT(0)) ||
1042 		    cur > 0)
1043 			continue;
1044 
1045 		if (cur > signal)
1046 			swap(cur, signal);
1047 
1048 		diff = signal - cur;
1049 		if (diff == 0)
1050 			signal += 3;
1051 		else if (diff <= 2)
1052 			signal += 2;
1053 		else if (diff <= 6)
1054 			signal += 1;
1055 	}
1056 
1057 	return signal;
1058 }
1059 EXPORT_SYMBOL(mt76_rx_signal);
1060 
1061 static void
1062 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1063 		struct ieee80211_hw **hw,
1064 		struct ieee80211_sta **sta)
1065 {
1066 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1067 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1068 	struct mt76_rx_status mstat;
1069 
1070 	mstat = *((struct mt76_rx_status *)skb->cb);
1071 	memset(status, 0, sizeof(*status));
1072 
1073 	status->flag = mstat.flag;
1074 	status->freq = mstat.freq;
1075 	status->enc_flags = mstat.enc_flags;
1076 	status->encoding = mstat.encoding;
1077 	status->bw = mstat.bw;
1078 	if (status->encoding == RX_ENC_EHT) {
1079 		status->eht.ru = mstat.eht.ru;
1080 		status->eht.gi = mstat.eht.gi;
1081 	} else {
1082 		status->he_ru = mstat.he_ru;
1083 		status->he_gi = mstat.he_gi;
1084 		status->he_dcm = mstat.he_dcm;
1085 	}
1086 	status->rate_idx = mstat.rate_idx;
1087 	status->nss = mstat.nss;
1088 	status->band = mstat.band;
1089 	status->signal = mstat.signal;
1090 	status->chains = mstat.chains;
1091 	status->ampdu_reference = mstat.ampdu_ref;
1092 	status->device_timestamp = mstat.timestamp;
1093 	status->mactime = mstat.timestamp;
1094 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1095 	if (status->signal <= -128)
1096 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1097 
1098 	if (ieee80211_is_beacon(hdr->frame_control) ||
1099 	    ieee80211_is_probe_resp(hdr->frame_control))
1100 		status->boottime_ns = ktime_get_boottime_ns();
1101 
1102 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1103 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1104 		     sizeof(mstat.chain_signal));
1105 	memcpy(status->chain_signal, mstat.chain_signal,
1106 	       sizeof(mstat.chain_signal));
1107 
1108 	*sta = wcid_to_sta(mstat.wcid);
1109 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1110 }
1111 
1112 static void
1113 mt76_check_ccmp_pn(struct sk_buff *skb)
1114 {
1115 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1116 	struct mt76_wcid *wcid = status->wcid;
1117 	struct ieee80211_hdr *hdr;
1118 	int security_idx;
1119 	int ret;
1120 
1121 	if (!(status->flag & RX_FLAG_DECRYPTED))
1122 		return;
1123 
1124 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1125 		return;
1126 
1127 	if (!wcid || !wcid->rx_check_pn)
1128 		return;
1129 
1130 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1131 	if (status->flag & RX_FLAG_8023)
1132 		goto skip_hdr_check;
1133 
1134 	hdr = mt76_skb_get_hdr(skb);
1135 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1136 		/*
1137 		 * Validate the first fragment both here and in mac80211
1138 		 * All further fragments will be validated by mac80211 only.
1139 		 */
1140 		if (ieee80211_is_frag(hdr) &&
1141 		    !ieee80211_is_first_frag(hdr->frame_control))
1142 			return;
1143 	}
1144 
1145 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1146 	 *
1147 	 * the recipient shall maintain a single replay counter for received
1148 	 * individually addressed robust Management frames that are received
1149 	 * with the To DS subfield equal to 0, [...]
1150 	 */
1151 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1152 	    !ieee80211_has_tods(hdr->frame_control))
1153 		security_idx = IEEE80211_NUM_TIDS;
1154 
1155 skip_hdr_check:
1156 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1157 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1158 		     sizeof(status->iv));
1159 	if (ret <= 0) {
1160 		status->flag |= RX_FLAG_ONLY_MONITOR;
1161 		return;
1162 	}
1163 
1164 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1165 
1166 	if (status->flag & RX_FLAG_IV_STRIPPED)
1167 		status->flag |= RX_FLAG_PN_VALIDATED;
1168 }
1169 
1170 static void
1171 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1172 		    int len)
1173 {
1174 	struct mt76_wcid *wcid = status->wcid;
1175 	struct ieee80211_rx_status info = {
1176 		.enc_flags = status->enc_flags,
1177 		.rate_idx = status->rate_idx,
1178 		.encoding = status->encoding,
1179 		.band = status->band,
1180 		.nss = status->nss,
1181 		.bw = status->bw,
1182 	};
1183 	struct ieee80211_sta *sta;
1184 	u32 airtime;
1185 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1186 
1187 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1188 	spin_lock(&dev->cc_lock);
1189 	dev->cur_cc_bss_rx += airtime;
1190 	spin_unlock(&dev->cc_lock);
1191 
1192 	if (!wcid || !wcid->sta)
1193 		return;
1194 
1195 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1196 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1197 }
1198 
1199 static void
1200 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1201 {
1202 	struct mt76_wcid *wcid;
1203 	int wcid_idx;
1204 
1205 	if (!dev->rx_ampdu_len)
1206 		return;
1207 
1208 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1209 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1210 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1211 	else
1212 		wcid = NULL;
1213 	dev->rx_ampdu_status.wcid = wcid;
1214 
1215 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1216 
1217 	dev->rx_ampdu_len = 0;
1218 	dev->rx_ampdu_ref = 0;
1219 }
1220 
1221 static void
1222 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1223 {
1224 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1225 	struct mt76_wcid *wcid = status->wcid;
1226 
1227 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1228 		return;
1229 
1230 	if (!wcid || !wcid->sta) {
1231 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1232 
1233 		if (status->flag & RX_FLAG_8023)
1234 			return;
1235 
1236 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1237 			return;
1238 
1239 		wcid = NULL;
1240 	}
1241 
1242 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1243 	    status->ampdu_ref != dev->rx_ampdu_ref)
1244 		mt76_airtime_flush_ampdu(dev);
1245 
1246 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1247 		if (!dev->rx_ampdu_len ||
1248 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1249 			dev->rx_ampdu_status = *status;
1250 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1251 			dev->rx_ampdu_ref = status->ampdu_ref;
1252 		}
1253 
1254 		dev->rx_ampdu_len += skb->len;
1255 		return;
1256 	}
1257 
1258 	mt76_airtime_report(dev, status, skb->len);
1259 }
1260 
1261 static void
1262 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1263 {
1264 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1265 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1266 	struct ieee80211_sta *sta;
1267 	struct ieee80211_hw *hw;
1268 	struct mt76_wcid *wcid = status->wcid;
1269 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1270 	bool ps;
1271 
1272 	hw = mt76_phy_hw(dev, status->phy_idx);
1273 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1274 	    !(status->flag & RX_FLAG_8023)) {
1275 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1276 		if (sta)
1277 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1278 	}
1279 
1280 	mt76_airtime_check(dev, skb);
1281 
1282 	if (!wcid || !wcid->sta)
1283 		return;
1284 
1285 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1286 
1287 	if (status->signal <= 0)
1288 		ewma_signal_add(&wcid->rssi, -status->signal);
1289 
1290 	wcid->inactive_count = 0;
1291 
1292 	if (status->flag & RX_FLAG_8023)
1293 		return;
1294 
1295 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1296 		return;
1297 
1298 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1299 		ieee80211_sta_pspoll(sta);
1300 		return;
1301 	}
1302 
1303 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1304 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1305 	      ieee80211_is_data(hdr->frame_control)))
1306 		return;
1307 
1308 	ps = ieee80211_has_pm(hdr->frame_control);
1309 
1310 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1311 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1312 		ieee80211_sta_uapsd_trigger(sta, tidno);
1313 
1314 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1315 		return;
1316 
1317 	if (ps)
1318 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1319 
1320 	if (dev->drv->sta_ps)
1321 		dev->drv->sta_ps(dev, sta, ps);
1322 
1323 	if (!ps)
1324 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1325 
1326 	ieee80211_sta_ps_transition(sta, ps);
1327 }
1328 
1329 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1330 		      struct napi_struct *napi)
1331 {
1332 	struct ieee80211_sta *sta;
1333 	struct ieee80211_hw *hw;
1334 	struct sk_buff *skb, *tmp;
1335 	LIST_HEAD(list);
1336 
1337 	spin_lock(&dev->rx_lock);
1338 	while ((skb = __skb_dequeue(frames)) != NULL) {
1339 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1340 
1341 		mt76_check_ccmp_pn(skb);
1342 		skb_shinfo(skb)->frag_list = NULL;
1343 		mt76_rx_convert(dev, skb, &hw, &sta);
1344 		ieee80211_rx_list(hw, sta, skb, &list);
1345 
1346 		/* subsequent amsdu frames */
1347 		while (nskb) {
1348 			skb = nskb;
1349 			nskb = nskb->next;
1350 			skb->next = NULL;
1351 
1352 			mt76_rx_convert(dev, skb, &hw, &sta);
1353 			ieee80211_rx_list(hw, sta, skb, &list);
1354 		}
1355 	}
1356 	spin_unlock(&dev->rx_lock);
1357 
1358 	if (!napi) {
1359 		netif_receive_skb_list(&list);
1360 		return;
1361 	}
1362 
1363 	list_for_each_entry_safe(skb, tmp, &list, list) {
1364 		skb_list_del_init(skb);
1365 		napi_gro_receive(napi, skb);
1366 	}
1367 }
1368 
1369 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1370 			   struct napi_struct *napi)
1371 {
1372 	struct sk_buff_head frames;
1373 	struct sk_buff *skb;
1374 
1375 	__skb_queue_head_init(&frames);
1376 
1377 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1378 		mt76_check_sta(dev, skb);
1379 		if (mtk_wed_device_active(&dev->mmio.wed))
1380 			__skb_queue_tail(&frames, skb);
1381 		else
1382 			mt76_rx_aggr_reorder(skb, &frames);
1383 	}
1384 
1385 	mt76_rx_complete(dev, &frames, napi);
1386 }
1387 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1388 
1389 static int
1390 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1391 	     struct ieee80211_sta *sta)
1392 {
1393 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1394 	struct mt76_dev *dev = phy->dev;
1395 	int ret;
1396 	int i;
1397 
1398 	mutex_lock(&dev->mutex);
1399 
1400 	ret = dev->drv->sta_add(dev, vif, sta);
1401 	if (ret)
1402 		goto out;
1403 
1404 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1405 		struct mt76_txq *mtxq;
1406 
1407 		if (!sta->txq[i])
1408 			continue;
1409 
1410 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1411 		mtxq->wcid = wcid->idx;
1412 	}
1413 
1414 	ewma_signal_init(&wcid->rssi);
1415 	if (phy->band_idx == MT_BAND1)
1416 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1417 	wcid->phy_idx = phy->band_idx;
1418 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1419 
1420 	mt76_wcid_init(wcid);
1421 out:
1422 	mutex_unlock(&dev->mutex);
1423 
1424 	return ret;
1425 }
1426 
1427 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1428 		       struct ieee80211_sta *sta)
1429 {
1430 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1431 	int i, idx = wcid->idx;
1432 
1433 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1434 		mt76_rx_aggr_stop(dev, wcid, i);
1435 
1436 	if (dev->drv->sta_remove)
1437 		dev->drv->sta_remove(dev, vif, sta);
1438 
1439 	mt76_wcid_cleanup(dev, wcid);
1440 
1441 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1442 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1443 }
1444 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1445 
1446 static void
1447 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1448 		struct ieee80211_sta *sta)
1449 {
1450 	mutex_lock(&dev->mutex);
1451 	__mt76_sta_remove(dev, vif, sta);
1452 	mutex_unlock(&dev->mutex);
1453 }
1454 
1455 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1456 		   struct ieee80211_sta *sta,
1457 		   enum ieee80211_sta_state old_state,
1458 		   enum ieee80211_sta_state new_state)
1459 {
1460 	struct mt76_phy *phy = hw->priv;
1461 	struct mt76_dev *dev = phy->dev;
1462 
1463 	if (old_state == IEEE80211_STA_NOTEXIST &&
1464 	    new_state == IEEE80211_STA_NONE)
1465 		return mt76_sta_add(phy, vif, sta);
1466 
1467 	if (old_state == IEEE80211_STA_AUTH &&
1468 	    new_state == IEEE80211_STA_ASSOC &&
1469 	    dev->drv->sta_assoc)
1470 		dev->drv->sta_assoc(dev, vif, sta);
1471 
1472 	if (old_state == IEEE80211_STA_NONE &&
1473 	    new_state == IEEE80211_STA_NOTEXIST)
1474 		mt76_sta_remove(dev, vif, sta);
1475 
1476 	return 0;
1477 }
1478 EXPORT_SYMBOL_GPL(mt76_sta_state);
1479 
1480 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1481 			     struct ieee80211_sta *sta)
1482 {
1483 	struct mt76_phy *phy = hw->priv;
1484 	struct mt76_dev *dev = phy->dev;
1485 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1486 
1487 	mutex_lock(&dev->mutex);
1488 	spin_lock_bh(&dev->status_lock);
1489 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1490 	spin_unlock_bh(&dev->status_lock);
1491 	mutex_unlock(&dev->mutex);
1492 }
1493 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1494 
1495 void mt76_wcid_init(struct mt76_wcid *wcid)
1496 {
1497 	INIT_LIST_HEAD(&wcid->tx_list);
1498 	skb_queue_head_init(&wcid->tx_pending);
1499 
1500 	INIT_LIST_HEAD(&wcid->list);
1501 	idr_init(&wcid->pktid);
1502 }
1503 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1504 
1505 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1506 {
1507 	struct mt76_phy *phy = dev->phys[wcid->phy_idx];
1508 	struct ieee80211_hw *hw;
1509 	struct sk_buff_head list;
1510 	struct sk_buff *skb;
1511 
1512 	mt76_tx_status_lock(dev, &list);
1513 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1514 	mt76_tx_status_unlock(dev, &list);
1515 
1516 	idr_destroy(&wcid->pktid);
1517 
1518 	spin_lock_bh(&phy->tx_lock);
1519 
1520 	if (!list_empty(&wcid->tx_list))
1521 		list_del_init(&wcid->tx_list);
1522 
1523 	spin_lock(&wcid->tx_pending.lock);
1524 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1525 	spin_unlock(&wcid->tx_pending.lock);
1526 
1527 	spin_unlock_bh(&phy->tx_lock);
1528 
1529 	while ((skb = __skb_dequeue(&list)) != NULL) {
1530 		hw = mt76_tx_status_get_hw(dev, skb);
1531 		ieee80211_free_txskb(hw, skb);
1532 	}
1533 }
1534 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1535 
1536 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1537 		     int *dbm)
1538 {
1539 	struct mt76_phy *phy = hw->priv;
1540 	int n_chains = hweight8(phy->antenna_mask);
1541 	int delta = mt76_tx_power_nss_delta(n_chains);
1542 
1543 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1544 
1545 	return 0;
1546 }
1547 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1548 
1549 int mt76_init_sar_power(struct ieee80211_hw *hw,
1550 			const struct cfg80211_sar_specs *sar)
1551 {
1552 	struct mt76_phy *phy = hw->priv;
1553 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1554 	int i;
1555 
1556 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1557 		return -EINVAL;
1558 
1559 	for (i = 0; i < sar->num_sub_specs; i++) {
1560 		u32 index = sar->sub_specs[i].freq_range_index;
1561 		/* SAR specifies power limitaton in 0.25dbm */
1562 		s32 power = sar->sub_specs[i].power >> 1;
1563 
1564 		if (power > 127 || power < -127)
1565 			power = 127;
1566 
1567 		phy->frp[index].range = &capa->freq_ranges[index];
1568 		phy->frp[index].power = power;
1569 	}
1570 
1571 	return 0;
1572 }
1573 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1574 
1575 int mt76_get_sar_power(struct mt76_phy *phy,
1576 		       struct ieee80211_channel *chan,
1577 		       int power)
1578 {
1579 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1580 	int freq, i;
1581 
1582 	if (!capa || !phy->frp)
1583 		return power;
1584 
1585 	if (power > 127 || power < -127)
1586 		power = 127;
1587 
1588 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1589 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1590 		if (phy->frp[i].range &&
1591 		    freq >= phy->frp[i].range->start_freq &&
1592 		    freq < phy->frp[i].range->end_freq) {
1593 			power = min_t(int, phy->frp[i].power, power);
1594 			break;
1595 		}
1596 	}
1597 
1598 	return power;
1599 }
1600 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1601 
1602 static void
1603 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1604 {
1605 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1606 		ieee80211_csa_finish(vif);
1607 }
1608 
1609 void mt76_csa_finish(struct mt76_dev *dev)
1610 {
1611 	if (!dev->csa_complete)
1612 		return;
1613 
1614 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1615 		IEEE80211_IFACE_ITER_RESUME_ALL,
1616 		__mt76_csa_finish, dev);
1617 
1618 	dev->csa_complete = 0;
1619 }
1620 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1621 
1622 static void
1623 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1624 {
1625 	struct mt76_dev *dev = priv;
1626 
1627 	if (!vif->bss_conf.csa_active)
1628 		return;
1629 
1630 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1631 }
1632 
1633 void mt76_csa_check(struct mt76_dev *dev)
1634 {
1635 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1636 		IEEE80211_IFACE_ITER_RESUME_ALL,
1637 		__mt76_csa_check, dev);
1638 }
1639 EXPORT_SYMBOL_GPL(mt76_csa_check);
1640 
1641 int
1642 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1643 {
1644 	return 0;
1645 }
1646 EXPORT_SYMBOL_GPL(mt76_set_tim);
1647 
1648 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1649 {
1650 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1651 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1652 	u8 *hdr, *pn = status->iv;
1653 
1654 	__skb_push(skb, 8);
1655 	memmove(skb->data, skb->data + 8, hdr_len);
1656 	hdr = skb->data + hdr_len;
1657 
1658 	hdr[0] = pn[5];
1659 	hdr[1] = pn[4];
1660 	hdr[2] = 0;
1661 	hdr[3] = 0x20 | (key_id << 6);
1662 	hdr[4] = pn[3];
1663 	hdr[5] = pn[2];
1664 	hdr[6] = pn[1];
1665 	hdr[7] = pn[0];
1666 
1667 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1668 }
1669 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1670 
1671 int mt76_get_rate(struct mt76_dev *dev,
1672 		  struct ieee80211_supported_band *sband,
1673 		  int idx, bool cck)
1674 {
1675 	int i, offset = 0, len = sband->n_bitrates;
1676 
1677 	if (cck) {
1678 		if (sband != &dev->phy.sband_2g.sband)
1679 			return 0;
1680 
1681 		idx &= ~BIT(2); /* short preamble */
1682 	} else if (sband == &dev->phy.sband_2g.sband) {
1683 		offset = 4;
1684 	}
1685 
1686 	for (i = offset; i < len; i++) {
1687 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1688 			return i;
1689 	}
1690 
1691 	return 0;
1692 }
1693 EXPORT_SYMBOL_GPL(mt76_get_rate);
1694 
1695 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1696 		  const u8 *mac)
1697 {
1698 	struct mt76_phy *phy = hw->priv;
1699 
1700 	set_bit(MT76_SCANNING, &phy->state);
1701 }
1702 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1703 
1704 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1705 {
1706 	struct mt76_phy *phy = hw->priv;
1707 
1708 	clear_bit(MT76_SCANNING, &phy->state);
1709 }
1710 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1711 
1712 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1713 {
1714 	struct mt76_phy *phy = hw->priv;
1715 	struct mt76_dev *dev = phy->dev;
1716 
1717 	mutex_lock(&dev->mutex);
1718 	*tx_ant = phy->antenna_mask;
1719 	*rx_ant = phy->antenna_mask;
1720 	mutex_unlock(&dev->mutex);
1721 
1722 	return 0;
1723 }
1724 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1725 
1726 struct mt76_queue *
1727 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1728 		int ring_base, u32 flags)
1729 {
1730 	struct mt76_queue *hwq;
1731 	int err;
1732 
1733 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1734 	if (!hwq)
1735 		return ERR_PTR(-ENOMEM);
1736 
1737 	hwq->flags = flags;
1738 
1739 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1740 	if (err < 0)
1741 		return ERR_PTR(err);
1742 
1743 	return hwq;
1744 }
1745 EXPORT_SYMBOL_GPL(mt76_init_queue);
1746 
1747 u16 mt76_calculate_default_rate(struct mt76_phy *phy,
1748 				struct ieee80211_vif *vif, int rateidx)
1749 {
1750 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1751 	struct cfg80211_chan_def *chandef = mvif->ctx ?
1752 					    &mvif->ctx->def :
1753 					    &phy->chandef;
1754 	int offset = 0;
1755 
1756 	if (chandef->chan->band != NL80211_BAND_2GHZ)
1757 		offset = 4;
1758 
1759 	/* pick the lowest rate for hidden nodes */
1760 	if (rateidx < 0)
1761 		rateidx = 0;
1762 
1763 	rateidx += offset;
1764 	if (rateidx >= ARRAY_SIZE(mt76_rates))
1765 		rateidx = offset;
1766 
1767 	return mt76_rates[rateidx].hw_value;
1768 }
1769 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1770 
1771 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1772 			 struct mt76_sta_stats *stats, bool eht)
1773 {
1774 	int i, ei = wi->initial_stat_idx;
1775 	u64 *data = wi->data;
1776 
1777 	wi->sta_count++;
1778 
1779 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1780 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1781 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1782 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1783 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1784 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1785 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1786 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1787 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1788 	if (eht) {
1789 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1790 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1791 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1792 	}
1793 
1794 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1795 		data[ei++] += stats->tx_bw[i];
1796 
1797 	for (i = 0; i < (eht ? 14 : 12); i++)
1798 		data[ei++] += stats->tx_mcs[i];
1799 
1800 	for (i = 0; i < 4; i++)
1801 		data[ei++] += stats->tx_nss[i];
1802 
1803 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1804 }
1805 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1806 
1807 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1808 {
1809 #ifdef CONFIG_PAGE_POOL_STATS
1810 	struct page_pool_stats stats = {};
1811 	int i;
1812 
1813 	mt76_for_each_q_rx(dev, i)
1814 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1815 
1816 	page_pool_ethtool_stats_get(data, &stats);
1817 	*index += page_pool_ethtool_stats_get_count();
1818 #endif
1819 }
1820 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1821 
1822 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1823 {
1824 	struct ieee80211_hw *hw = phy->hw;
1825 	struct mt76_dev *dev = phy->dev;
1826 
1827 	if (dev->region == NL80211_DFS_UNSET ||
1828 	    test_bit(MT76_SCANNING, &phy->state))
1829 		return MT_DFS_STATE_DISABLED;
1830 
1831 	if (!hw->conf.radar_enabled) {
1832 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1833 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1834 			return MT_DFS_STATE_ACTIVE;
1835 
1836 		return MT_DFS_STATE_DISABLED;
1837 	}
1838 
1839 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1840 		return MT_DFS_STATE_CAC;
1841 
1842 	return MT_DFS_STATE_ACTIVE;
1843 }
1844 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1845