xref: /linux/drivers/net/wireless/mediatek/mt76/mac80211.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 	struct device_node *np = dev->dev->of_node;
201 
202 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 		return 0;
204 
205 	np = of_get_child_by_name(np, "led");
206 	if (np) {
207 		if (!of_device_is_available(np)) {
208 			of_node_put(np);
209 			dev_info(dev->dev,
210 				"led registration was explicitly disabled by dts\n");
211 			return 0;
212 		}
213 
214 		if (phy == &dev->phy) {
215 			int led_pin;
216 
217 			if (!of_property_read_u32(np, "led-sources", &led_pin))
218 				phy->leds.pin = led_pin;
219 
220 			phy->leds.al =
221 				of_property_read_bool(np, "led-active-low");
222 		}
223 
224 		of_node_put(np);
225 	}
226 
227 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 		 wiphy_name(hw->wiphy));
229 
230 	phy->leds.cdev.name = phy->leds.name;
231 	phy->leds.cdev.default_trigger =
232 		ieee80211_create_tpt_led_trigger(hw,
233 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 					mt76_tpt_blink,
235 					ARRAY_SIZE(mt76_tpt_blink));
236 
237 	dev_info(dev->dev,
238 		"registering led '%s'\n", phy->leds.name);
239 
240 	return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242 
243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 		return;
247 
248 	led_classdev_unregister(&phy->leds.cdev);
249 }
250 
251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 				 struct ieee80211_supported_band *sband,
253 				 bool vht)
254 {
255 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 	int i, nstream = hweight8(phy->antenna_mask);
257 	struct ieee80211_sta_vht_cap *vht_cap;
258 	u16 mcs_map = 0;
259 
260 	if (nstream > 1)
261 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 	else
263 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264 
265 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267 
268 	if (!vht)
269 		return;
270 
271 	vht_cap = &sband->vht_cap;
272 	if (nstream > 1)
273 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 	else
275 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278 
279 	for (i = 0; i < 8; i++) {
280 		if (i < nstream)
281 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 		else
283 			mcs_map |=
284 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 	}
286 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 		vht_cap->vht_mcs.tx_highest |=
290 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292 
293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 	if (phy->cap.has_2ghz)
296 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 	if (phy->cap.has_5ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 	if (phy->cap.has_6ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303 
304 static int
305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 		const struct ieee80211_channel *chan, int n_chan,
307 		struct ieee80211_rate *rates, int n_rates,
308 		bool ht, bool vht)
309 {
310 	struct ieee80211_supported_band *sband = &msband->sband;
311 	struct ieee80211_sta_vht_cap *vht_cap;
312 	struct ieee80211_sta_ht_cap *ht_cap;
313 	struct mt76_dev *dev = phy->dev;
314 	void *chanlist;
315 	int size;
316 
317 	size = n_chan * sizeof(*chan);
318 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 	if (!chanlist)
320 		return -ENOMEM;
321 
322 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 				    GFP_KERNEL);
324 	if (!msband->chan)
325 		return -ENOMEM;
326 
327 	sband->channels = chanlist;
328 	sband->n_channels = n_chan;
329 	sband->bitrates = rates;
330 	sband->n_bitrates = n_rates;
331 
332 	if (!ht)
333 		return 0;
334 
335 	ht_cap = &sband->ht_cap;
336 	ht_cap->ht_supported = true;
337 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 		       IEEE80211_HT_CAP_GRN_FLD |
339 		       IEEE80211_HT_CAP_SGI_20 |
340 		       IEEE80211_HT_CAP_SGI_40 |
341 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342 
343 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345 
346 	mt76_init_stream_cap(phy, sband, vht);
347 
348 	if (!vht)
349 		return 0;
350 
351 	vht_cap = &sband->vht_cap;
352 	vht_cap->vht_supported = true;
353 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 			IEEE80211_VHT_CAP_RXSTBC_1 |
355 			IEEE80211_VHT_CAP_SHORT_GI_80 |
356 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357 
358 	return 0;
359 }
360 
361 static int
362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369 			       n_rates, true, false);
370 }
371 
372 static int
373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates, bool vht)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380 			       n_rates, true, vht);
381 }
382 
383 static int
384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 		   int n_rates)
386 {
387 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388 
389 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391 			       n_rates, false, false);
392 }
393 
394 static void
395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 		 enum nl80211_band band)
397 {
398 	struct ieee80211_supported_band *sband = &msband->sband;
399 	bool found = false;
400 	int i;
401 
402 	if (!sband)
403 		return;
404 
405 	for (i = 0; i < sband->n_channels; i++) {
406 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 			continue;
408 
409 		found = true;
410 		break;
411 	}
412 
413 	if (found) {
414 		phy->chandef.chan = &sband->channels[0];
415 		phy->chan_state = &msband->chan[0];
416 		return;
417 	}
418 
419 	sband->n_channels = 0;
420 	phy->hw->wiphy->bands[band] = NULL;
421 }
422 
423 static int
424 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
425 {
426 	struct mt76_dev *dev = phy->dev;
427 	struct wiphy *wiphy = hw->wiphy;
428 
429 	INIT_LIST_HEAD(&phy->tx_list);
430 	spin_lock_init(&phy->tx_lock);
431 
432 	SET_IEEE80211_DEV(hw, dev->dev);
433 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
434 
435 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
436 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
437 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
438 			WIPHY_FLAG_SUPPORTS_TDLS |
439 			WIPHY_FLAG_AP_UAPSD;
440 
441 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
442 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
443 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
444 
445 	wiphy->available_antennas_tx = phy->antenna_mask;
446 	wiphy->available_antennas_rx = phy->antenna_mask;
447 
448 	wiphy->sar_capa = &mt76_sar_capa;
449 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
450 				sizeof(struct mt76_freq_range_power),
451 				GFP_KERNEL);
452 	if (!phy->frp)
453 		return -ENOMEM;
454 
455 	hw->txq_data_size = sizeof(struct mt76_txq);
456 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
457 
458 	if (!hw->max_tx_fragments)
459 		hw->max_tx_fragments = 16;
460 
461 	ieee80211_hw_set(hw, SIGNAL_DBM);
462 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
463 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
464 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
465 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
466 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
467 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
468 
469 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
470 	    hw->max_tx_fragments > 1) {
471 		ieee80211_hw_set(hw, TX_AMSDU);
472 		ieee80211_hw_set(hw, TX_FRAG_LIST);
473 	}
474 
475 	ieee80211_hw_set(hw, MFP_CAPABLE);
476 	ieee80211_hw_set(hw, AP_LINK_PS);
477 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
478 
479 	return 0;
480 }
481 
482 struct mt76_phy *
483 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
484 	       const struct ieee80211_ops *ops, u8 band_idx)
485 {
486 	struct ieee80211_hw *hw;
487 	unsigned int phy_size;
488 	struct mt76_phy *phy;
489 
490 	phy_size = ALIGN(sizeof(*phy), 8);
491 	hw = ieee80211_alloc_hw(size + phy_size, ops);
492 	if (!hw)
493 		return NULL;
494 
495 	phy = hw->priv;
496 	phy->dev = dev;
497 	phy->hw = hw;
498 	phy->priv = hw->priv + phy_size;
499 	phy->band_idx = band_idx;
500 
501 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
502 	hw->wiphy->interface_modes =
503 		BIT(NL80211_IFTYPE_STATION) |
504 		BIT(NL80211_IFTYPE_AP) |
505 #ifdef CONFIG_MAC80211_MESH
506 		BIT(NL80211_IFTYPE_MESH_POINT) |
507 #endif
508 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
509 		BIT(NL80211_IFTYPE_P2P_GO) |
510 		BIT(NL80211_IFTYPE_ADHOC);
511 
512 	return phy;
513 }
514 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
515 
516 int mt76_register_phy(struct mt76_phy *phy, bool vht,
517 		      struct ieee80211_rate *rates, int n_rates)
518 {
519 	int ret;
520 
521 	ret = mt76_phy_init(phy, phy->hw);
522 	if (ret)
523 		return ret;
524 
525 	if (phy->cap.has_2ghz) {
526 		ret = mt76_init_sband_2g(phy, rates, n_rates);
527 		if (ret)
528 			return ret;
529 	}
530 
531 	if (phy->cap.has_5ghz) {
532 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
533 		if (ret)
534 			return ret;
535 	}
536 
537 	if (phy->cap.has_6ghz) {
538 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
539 		if (ret)
540 			return ret;
541 	}
542 
543 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
544 		ret = mt76_led_init(phy);
545 		if (ret)
546 			return ret;
547 	}
548 
549 	wiphy_read_of_freq_limits(phy->hw->wiphy);
550 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
551 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
552 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
553 
554 	ret = ieee80211_register_hw(phy->hw);
555 	if (ret)
556 		return ret;
557 
558 	set_bit(MT76_STATE_REGISTERED, &phy->state);
559 	phy->dev->phys[phy->band_idx] = phy;
560 
561 	return 0;
562 }
563 EXPORT_SYMBOL_GPL(mt76_register_phy);
564 
565 void mt76_unregister_phy(struct mt76_phy *phy)
566 {
567 	struct mt76_dev *dev = phy->dev;
568 
569 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
570 		return;
571 
572 	if (IS_ENABLED(CONFIG_MT76_LEDS))
573 		mt76_led_cleanup(phy);
574 	mt76_tx_status_check(dev, true);
575 	ieee80211_unregister_hw(phy->hw);
576 	dev->phys[phy->band_idx] = NULL;
577 }
578 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
579 
580 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
581 {
582 	struct page_pool_params pp_params = {
583 		.order = 0,
584 		.flags = 0,
585 		.nid = NUMA_NO_NODE,
586 		.dev = dev->dma_dev,
587 	};
588 	int idx = q - dev->q_rx;
589 
590 	switch (idx) {
591 	case MT_RXQ_MAIN:
592 	case MT_RXQ_BAND1:
593 	case MT_RXQ_BAND2:
594 		pp_params.pool_size = 256;
595 		break;
596 	default:
597 		pp_params.pool_size = 16;
598 		break;
599 	}
600 
601 	if (mt76_is_mmio(dev)) {
602 		/* rely on page_pool for DMA mapping */
603 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
604 		pp_params.dma_dir = DMA_FROM_DEVICE;
605 		pp_params.max_len = PAGE_SIZE;
606 		pp_params.offset = 0;
607 	}
608 
609 	q->page_pool = page_pool_create(&pp_params);
610 	if (IS_ERR(q->page_pool)) {
611 		int err = PTR_ERR(q->page_pool);
612 
613 		q->page_pool = NULL;
614 		return err;
615 	}
616 
617 	return 0;
618 }
619 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
620 
621 struct mt76_dev *
622 mt76_alloc_device(struct device *pdev, unsigned int size,
623 		  const struct ieee80211_ops *ops,
624 		  const struct mt76_driver_ops *drv_ops)
625 {
626 	struct ieee80211_hw *hw;
627 	struct mt76_phy *phy;
628 	struct mt76_dev *dev;
629 	int i;
630 
631 	hw = ieee80211_alloc_hw(size, ops);
632 	if (!hw)
633 		return NULL;
634 
635 	dev = hw->priv;
636 	dev->hw = hw;
637 	dev->dev = pdev;
638 	dev->drv = drv_ops;
639 	dev->dma_dev = pdev;
640 
641 	phy = &dev->phy;
642 	phy->dev = dev;
643 	phy->hw = hw;
644 	phy->band_idx = MT_BAND0;
645 	dev->phys[phy->band_idx] = phy;
646 
647 	spin_lock_init(&dev->rx_lock);
648 	spin_lock_init(&dev->lock);
649 	spin_lock_init(&dev->cc_lock);
650 	spin_lock_init(&dev->status_lock);
651 	spin_lock_init(&dev->wed_lock);
652 	mutex_init(&dev->mutex);
653 	init_waitqueue_head(&dev->tx_wait);
654 
655 	skb_queue_head_init(&dev->mcu.res_q);
656 	init_waitqueue_head(&dev->mcu.wait);
657 	mutex_init(&dev->mcu.mutex);
658 	dev->tx_worker.fn = mt76_tx_worker;
659 
660 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
661 	hw->wiphy->interface_modes =
662 		BIT(NL80211_IFTYPE_STATION) |
663 		BIT(NL80211_IFTYPE_AP) |
664 #ifdef CONFIG_MAC80211_MESH
665 		BIT(NL80211_IFTYPE_MESH_POINT) |
666 #endif
667 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
668 		BIT(NL80211_IFTYPE_P2P_GO) |
669 		BIT(NL80211_IFTYPE_ADHOC);
670 
671 	spin_lock_init(&dev->token_lock);
672 	idr_init(&dev->token);
673 
674 	spin_lock_init(&dev->rx_token_lock);
675 	idr_init(&dev->rx_token);
676 
677 	INIT_LIST_HEAD(&dev->wcid_list);
678 	INIT_LIST_HEAD(&dev->sta_poll_list);
679 	spin_lock_init(&dev->sta_poll_lock);
680 
681 	INIT_LIST_HEAD(&dev->txwi_cache);
682 	INIT_LIST_HEAD(&dev->rxwi_cache);
683 	dev->token_size = dev->drv->token_size;
684 
685 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
686 		skb_queue_head_init(&dev->rx_skb[i]);
687 
688 	dev->wq = alloc_ordered_workqueue("mt76", 0);
689 	if (!dev->wq) {
690 		ieee80211_free_hw(hw);
691 		return NULL;
692 	}
693 
694 	return dev;
695 }
696 EXPORT_SYMBOL_GPL(mt76_alloc_device);
697 
698 int mt76_register_device(struct mt76_dev *dev, bool vht,
699 			 struct ieee80211_rate *rates, int n_rates)
700 {
701 	struct ieee80211_hw *hw = dev->hw;
702 	struct mt76_phy *phy = &dev->phy;
703 	int ret;
704 
705 	dev_set_drvdata(dev->dev, dev);
706 	mt76_wcid_init(&dev->global_wcid);
707 	ret = mt76_phy_init(phy, hw);
708 	if (ret)
709 		return ret;
710 
711 	if (phy->cap.has_2ghz) {
712 		ret = mt76_init_sband_2g(phy, rates, n_rates);
713 		if (ret)
714 			return ret;
715 	}
716 
717 	if (phy->cap.has_5ghz) {
718 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
719 		if (ret)
720 			return ret;
721 	}
722 
723 	if (phy->cap.has_6ghz) {
724 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
725 		if (ret)
726 			return ret;
727 	}
728 
729 	wiphy_read_of_freq_limits(hw->wiphy);
730 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
731 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
732 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
733 
734 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
735 		ret = mt76_led_init(phy);
736 		if (ret)
737 			return ret;
738 	}
739 
740 	ret = ieee80211_register_hw(hw);
741 	if (ret)
742 		return ret;
743 
744 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
745 	set_bit(MT76_STATE_REGISTERED, &phy->state);
746 	sched_set_fifo_low(dev->tx_worker.task);
747 
748 	return 0;
749 }
750 EXPORT_SYMBOL_GPL(mt76_register_device);
751 
752 void mt76_unregister_device(struct mt76_dev *dev)
753 {
754 	struct ieee80211_hw *hw = dev->hw;
755 
756 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
757 		return;
758 
759 	if (IS_ENABLED(CONFIG_MT76_LEDS))
760 		mt76_led_cleanup(&dev->phy);
761 	mt76_tx_status_check(dev, true);
762 	mt76_wcid_cleanup(dev, &dev->global_wcid);
763 	ieee80211_unregister_hw(hw);
764 }
765 EXPORT_SYMBOL_GPL(mt76_unregister_device);
766 
767 void mt76_free_device(struct mt76_dev *dev)
768 {
769 	mt76_worker_teardown(&dev->tx_worker);
770 	if (dev->wq) {
771 		destroy_workqueue(dev->wq);
772 		dev->wq = NULL;
773 	}
774 	ieee80211_free_hw(dev->hw);
775 }
776 EXPORT_SYMBOL_GPL(mt76_free_device);
777 
778 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
779 {
780 	struct sk_buff *skb = phy->rx_amsdu[q].head;
781 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
782 	struct mt76_dev *dev = phy->dev;
783 
784 	phy->rx_amsdu[q].head = NULL;
785 	phy->rx_amsdu[q].tail = NULL;
786 
787 	/*
788 	 * Validate if the amsdu has a proper first subframe.
789 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
790 	 * flag of the QoS header gets flipped. In such cases, the first
791 	 * subframe has a LLC/SNAP header in the location of the destination
792 	 * address.
793 	 */
794 	if (skb_shinfo(skb)->frag_list) {
795 		int offset = 0;
796 
797 		if (!(status->flag & RX_FLAG_8023)) {
798 			offset = ieee80211_get_hdrlen_from_skb(skb);
799 
800 			if ((status->flag &
801 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
802 			    RX_FLAG_DECRYPTED)
803 				offset += 8;
804 		}
805 
806 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
807 			dev_kfree_skb(skb);
808 			return;
809 		}
810 	}
811 	__skb_queue_tail(&dev->rx_skb[q], skb);
812 }
813 
814 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
815 				  struct sk_buff *skb)
816 {
817 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
818 
819 	if (phy->rx_amsdu[q].head &&
820 	    (!status->amsdu || status->first_amsdu ||
821 	     status->seqno != phy->rx_amsdu[q].seqno))
822 		mt76_rx_release_amsdu(phy, q);
823 
824 	if (!phy->rx_amsdu[q].head) {
825 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
826 		phy->rx_amsdu[q].seqno = status->seqno;
827 		phy->rx_amsdu[q].head = skb;
828 	} else {
829 		*phy->rx_amsdu[q].tail = skb;
830 		phy->rx_amsdu[q].tail = &skb->next;
831 	}
832 
833 	if (!status->amsdu || status->last_amsdu)
834 		mt76_rx_release_amsdu(phy, q);
835 }
836 
837 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
838 {
839 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
840 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
841 
842 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
843 		dev_kfree_skb(skb);
844 		return;
845 	}
846 
847 #ifdef CONFIG_NL80211_TESTMODE
848 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
849 		phy->test.rx_stats.packets[q]++;
850 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
851 			phy->test.rx_stats.fcs_error[q]++;
852 	}
853 #endif
854 
855 	mt76_rx_release_burst(phy, q, skb);
856 }
857 EXPORT_SYMBOL_GPL(mt76_rx);
858 
859 bool mt76_has_tx_pending(struct mt76_phy *phy)
860 {
861 	struct mt76_queue *q;
862 	int i;
863 
864 	for (i = 0; i < __MT_TXQ_MAX; i++) {
865 		q = phy->q_tx[i];
866 		if (q && q->queued)
867 			return true;
868 	}
869 
870 	return false;
871 }
872 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
873 
874 static struct mt76_channel_state *
875 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
876 {
877 	struct mt76_sband *msband;
878 	int idx;
879 
880 	if (c->band == NL80211_BAND_2GHZ)
881 		msband = &phy->sband_2g;
882 	else if (c->band == NL80211_BAND_6GHZ)
883 		msband = &phy->sband_6g;
884 	else
885 		msband = &phy->sband_5g;
886 
887 	idx = c - &msband->sband.channels[0];
888 	return &msband->chan[idx];
889 }
890 
891 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
892 {
893 	struct mt76_channel_state *state = phy->chan_state;
894 
895 	state->cc_active += ktime_to_us(ktime_sub(time,
896 						  phy->survey_time));
897 	phy->survey_time = time;
898 }
899 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
900 
901 void mt76_update_survey(struct mt76_phy *phy)
902 {
903 	struct mt76_dev *dev = phy->dev;
904 	ktime_t cur_time;
905 
906 	if (dev->drv->update_survey)
907 		dev->drv->update_survey(phy);
908 
909 	cur_time = ktime_get_boottime();
910 	mt76_update_survey_active_time(phy, cur_time);
911 
912 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
913 		struct mt76_channel_state *state = phy->chan_state;
914 
915 		spin_lock_bh(&dev->cc_lock);
916 		state->cc_bss_rx += dev->cur_cc_bss_rx;
917 		dev->cur_cc_bss_rx = 0;
918 		spin_unlock_bh(&dev->cc_lock);
919 	}
920 }
921 EXPORT_SYMBOL_GPL(mt76_update_survey);
922 
923 void mt76_set_channel(struct mt76_phy *phy)
924 {
925 	struct mt76_dev *dev = phy->dev;
926 	struct ieee80211_hw *hw = phy->hw;
927 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
928 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
929 	int timeout = HZ / 5;
930 
931 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
932 	mt76_update_survey(phy);
933 
934 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
935 	    phy->chandef.width != chandef->width)
936 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
937 
938 	phy->chandef = *chandef;
939 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
940 
941 	if (!offchannel)
942 		phy->main_chan = chandef->chan;
943 
944 	if (chandef->chan != phy->main_chan)
945 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
946 }
947 EXPORT_SYMBOL_GPL(mt76_set_channel);
948 
949 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
950 		    struct survey_info *survey)
951 {
952 	struct mt76_phy *phy = hw->priv;
953 	struct mt76_dev *dev = phy->dev;
954 	struct mt76_sband *sband;
955 	struct ieee80211_channel *chan;
956 	struct mt76_channel_state *state;
957 	int ret = 0;
958 
959 	mutex_lock(&dev->mutex);
960 	if (idx == 0 && dev->drv->update_survey)
961 		mt76_update_survey(phy);
962 
963 	if (idx >= phy->sband_2g.sband.n_channels +
964 		   phy->sband_5g.sband.n_channels) {
965 		idx -= (phy->sband_2g.sband.n_channels +
966 			phy->sband_5g.sband.n_channels);
967 		sband = &phy->sband_6g;
968 	} else if (idx >= phy->sband_2g.sband.n_channels) {
969 		idx -= phy->sband_2g.sband.n_channels;
970 		sband = &phy->sband_5g;
971 	} else {
972 		sband = &phy->sband_2g;
973 	}
974 
975 	if (idx >= sband->sband.n_channels) {
976 		ret = -ENOENT;
977 		goto out;
978 	}
979 
980 	chan = &sband->sband.channels[idx];
981 	state = mt76_channel_state(phy, chan);
982 
983 	memset(survey, 0, sizeof(*survey));
984 	survey->channel = chan;
985 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
986 	survey->filled |= dev->drv->survey_flags;
987 	if (state->noise)
988 		survey->filled |= SURVEY_INFO_NOISE_DBM;
989 
990 	if (chan == phy->main_chan) {
991 		survey->filled |= SURVEY_INFO_IN_USE;
992 
993 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
994 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
995 	}
996 
997 	survey->time_busy = div_u64(state->cc_busy, 1000);
998 	survey->time_rx = div_u64(state->cc_rx, 1000);
999 	survey->time = div_u64(state->cc_active, 1000);
1000 	survey->noise = state->noise;
1001 
1002 	spin_lock_bh(&dev->cc_lock);
1003 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1004 	survey->time_tx = div_u64(state->cc_tx, 1000);
1005 	spin_unlock_bh(&dev->cc_lock);
1006 
1007 out:
1008 	mutex_unlock(&dev->mutex);
1009 
1010 	return ret;
1011 }
1012 EXPORT_SYMBOL_GPL(mt76_get_survey);
1013 
1014 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1015 			 struct ieee80211_key_conf *key)
1016 {
1017 	struct ieee80211_key_seq seq;
1018 	int i;
1019 
1020 	wcid->rx_check_pn = false;
1021 
1022 	if (!key)
1023 		return;
1024 
1025 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1026 		return;
1027 
1028 	wcid->rx_check_pn = true;
1029 
1030 	/* data frame */
1031 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1032 		ieee80211_get_key_rx_seq(key, i, &seq);
1033 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1034 	}
1035 
1036 	/* robust management frame */
1037 	ieee80211_get_key_rx_seq(key, -1, &seq);
1038 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1039 
1040 }
1041 EXPORT_SYMBOL(mt76_wcid_key_setup);
1042 
1043 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1044 {
1045 	int signal = -128;
1046 	u8 chains;
1047 
1048 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1049 		int cur, diff;
1050 
1051 		cur = *chain_signal;
1052 		if (!(chains & BIT(0)) ||
1053 		    cur > 0)
1054 			continue;
1055 
1056 		if (cur > signal)
1057 			swap(cur, signal);
1058 
1059 		diff = signal - cur;
1060 		if (diff == 0)
1061 			signal += 3;
1062 		else if (diff <= 2)
1063 			signal += 2;
1064 		else if (diff <= 6)
1065 			signal += 1;
1066 	}
1067 
1068 	return signal;
1069 }
1070 EXPORT_SYMBOL(mt76_rx_signal);
1071 
1072 static void
1073 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1074 		struct ieee80211_hw **hw,
1075 		struct ieee80211_sta **sta)
1076 {
1077 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1078 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1079 	struct mt76_rx_status mstat;
1080 
1081 	mstat = *((struct mt76_rx_status *)skb->cb);
1082 	memset(status, 0, sizeof(*status));
1083 
1084 	status->flag = mstat.flag;
1085 	status->freq = mstat.freq;
1086 	status->enc_flags = mstat.enc_flags;
1087 	status->encoding = mstat.encoding;
1088 	status->bw = mstat.bw;
1089 	if (status->encoding == RX_ENC_EHT) {
1090 		status->eht.ru = mstat.eht.ru;
1091 		status->eht.gi = mstat.eht.gi;
1092 	} else {
1093 		status->he_ru = mstat.he_ru;
1094 		status->he_gi = mstat.he_gi;
1095 		status->he_dcm = mstat.he_dcm;
1096 	}
1097 	status->rate_idx = mstat.rate_idx;
1098 	status->nss = mstat.nss;
1099 	status->band = mstat.band;
1100 	status->signal = mstat.signal;
1101 	status->chains = mstat.chains;
1102 	status->ampdu_reference = mstat.ampdu_ref;
1103 	status->device_timestamp = mstat.timestamp;
1104 	status->mactime = mstat.timestamp;
1105 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1106 	if (status->signal <= -128)
1107 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1108 
1109 	if (ieee80211_is_beacon(hdr->frame_control) ||
1110 	    ieee80211_is_probe_resp(hdr->frame_control))
1111 		status->boottime_ns = ktime_get_boottime_ns();
1112 
1113 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1114 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1115 		     sizeof(mstat.chain_signal));
1116 	memcpy(status->chain_signal, mstat.chain_signal,
1117 	       sizeof(mstat.chain_signal));
1118 
1119 	*sta = wcid_to_sta(mstat.wcid);
1120 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1121 }
1122 
1123 static void
1124 mt76_check_ccmp_pn(struct sk_buff *skb)
1125 {
1126 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1127 	struct mt76_wcid *wcid = status->wcid;
1128 	struct ieee80211_hdr *hdr;
1129 	int security_idx;
1130 	int ret;
1131 
1132 	if (!(status->flag & RX_FLAG_DECRYPTED))
1133 		return;
1134 
1135 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1136 		return;
1137 
1138 	if (!wcid || !wcid->rx_check_pn)
1139 		return;
1140 
1141 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1142 	if (status->flag & RX_FLAG_8023)
1143 		goto skip_hdr_check;
1144 
1145 	hdr = mt76_skb_get_hdr(skb);
1146 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1147 		/*
1148 		 * Validate the first fragment both here and in mac80211
1149 		 * All further fragments will be validated by mac80211 only.
1150 		 */
1151 		if (ieee80211_is_frag(hdr) &&
1152 		    !ieee80211_is_first_frag(hdr->frame_control))
1153 			return;
1154 	}
1155 
1156 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1157 	 *
1158 	 * the recipient shall maintain a single replay counter for received
1159 	 * individually addressed robust Management frames that are received
1160 	 * with the To DS subfield equal to 0, [...]
1161 	 */
1162 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1163 	    !ieee80211_has_tods(hdr->frame_control))
1164 		security_idx = IEEE80211_NUM_TIDS;
1165 
1166 skip_hdr_check:
1167 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1168 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1169 		     sizeof(status->iv));
1170 	if (ret <= 0) {
1171 		status->flag |= RX_FLAG_ONLY_MONITOR;
1172 		return;
1173 	}
1174 
1175 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1176 
1177 	if (status->flag & RX_FLAG_IV_STRIPPED)
1178 		status->flag |= RX_FLAG_PN_VALIDATED;
1179 }
1180 
1181 static void
1182 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1183 		    int len)
1184 {
1185 	struct mt76_wcid *wcid = status->wcid;
1186 	struct ieee80211_rx_status info = {
1187 		.enc_flags = status->enc_flags,
1188 		.rate_idx = status->rate_idx,
1189 		.encoding = status->encoding,
1190 		.band = status->band,
1191 		.nss = status->nss,
1192 		.bw = status->bw,
1193 	};
1194 	struct ieee80211_sta *sta;
1195 	u32 airtime;
1196 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1197 
1198 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1199 	spin_lock(&dev->cc_lock);
1200 	dev->cur_cc_bss_rx += airtime;
1201 	spin_unlock(&dev->cc_lock);
1202 
1203 	if (!wcid || !wcid->sta)
1204 		return;
1205 
1206 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1207 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1208 }
1209 
1210 static void
1211 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1212 {
1213 	struct mt76_wcid *wcid;
1214 	int wcid_idx;
1215 
1216 	if (!dev->rx_ampdu_len)
1217 		return;
1218 
1219 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1220 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1221 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1222 	else
1223 		wcid = NULL;
1224 	dev->rx_ampdu_status.wcid = wcid;
1225 
1226 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1227 
1228 	dev->rx_ampdu_len = 0;
1229 	dev->rx_ampdu_ref = 0;
1230 }
1231 
1232 static void
1233 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1234 {
1235 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1236 	struct mt76_wcid *wcid = status->wcid;
1237 
1238 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1239 		return;
1240 
1241 	if (!wcid || !wcid->sta) {
1242 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1243 
1244 		if (status->flag & RX_FLAG_8023)
1245 			return;
1246 
1247 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1248 			return;
1249 
1250 		wcid = NULL;
1251 	}
1252 
1253 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1254 	    status->ampdu_ref != dev->rx_ampdu_ref)
1255 		mt76_airtime_flush_ampdu(dev);
1256 
1257 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1258 		if (!dev->rx_ampdu_len ||
1259 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1260 			dev->rx_ampdu_status = *status;
1261 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1262 			dev->rx_ampdu_ref = status->ampdu_ref;
1263 		}
1264 
1265 		dev->rx_ampdu_len += skb->len;
1266 		return;
1267 	}
1268 
1269 	mt76_airtime_report(dev, status, skb->len);
1270 }
1271 
1272 static void
1273 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1274 {
1275 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1276 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1277 	struct ieee80211_sta *sta;
1278 	struct ieee80211_hw *hw;
1279 	struct mt76_wcid *wcid = status->wcid;
1280 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1281 	bool ps;
1282 
1283 	hw = mt76_phy_hw(dev, status->phy_idx);
1284 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1285 	    !(status->flag & RX_FLAG_8023)) {
1286 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1287 		if (sta)
1288 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1289 	}
1290 
1291 	mt76_airtime_check(dev, skb);
1292 
1293 	if (!wcid || !wcid->sta)
1294 		return;
1295 
1296 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1297 
1298 	if (status->signal <= 0)
1299 		ewma_signal_add(&wcid->rssi, -status->signal);
1300 
1301 	wcid->inactive_count = 0;
1302 
1303 	if (status->flag & RX_FLAG_8023)
1304 		return;
1305 
1306 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1307 		return;
1308 
1309 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1310 		ieee80211_sta_pspoll(sta);
1311 		return;
1312 	}
1313 
1314 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1315 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1316 	      ieee80211_is_data(hdr->frame_control)))
1317 		return;
1318 
1319 	ps = ieee80211_has_pm(hdr->frame_control);
1320 
1321 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1322 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1323 		ieee80211_sta_uapsd_trigger(sta, tidno);
1324 
1325 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1326 		return;
1327 
1328 	if (ps)
1329 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1330 
1331 	if (dev->drv->sta_ps)
1332 		dev->drv->sta_ps(dev, sta, ps);
1333 
1334 	if (!ps)
1335 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1336 
1337 	ieee80211_sta_ps_transition(sta, ps);
1338 }
1339 
1340 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1341 		      struct napi_struct *napi)
1342 {
1343 	struct ieee80211_sta *sta;
1344 	struct ieee80211_hw *hw;
1345 	struct sk_buff *skb, *tmp;
1346 	LIST_HEAD(list);
1347 
1348 	spin_lock(&dev->rx_lock);
1349 	while ((skb = __skb_dequeue(frames)) != NULL) {
1350 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1351 
1352 		mt76_check_ccmp_pn(skb);
1353 		skb_shinfo(skb)->frag_list = NULL;
1354 		mt76_rx_convert(dev, skb, &hw, &sta);
1355 		ieee80211_rx_list(hw, sta, skb, &list);
1356 
1357 		/* subsequent amsdu frames */
1358 		while (nskb) {
1359 			skb = nskb;
1360 			nskb = nskb->next;
1361 			skb->next = NULL;
1362 
1363 			mt76_rx_convert(dev, skb, &hw, &sta);
1364 			ieee80211_rx_list(hw, sta, skb, &list);
1365 		}
1366 	}
1367 	spin_unlock(&dev->rx_lock);
1368 
1369 	if (!napi) {
1370 		netif_receive_skb_list(&list);
1371 		return;
1372 	}
1373 
1374 	list_for_each_entry_safe(skb, tmp, &list, list) {
1375 		skb_list_del_init(skb);
1376 		napi_gro_receive(napi, skb);
1377 	}
1378 }
1379 
1380 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1381 			   struct napi_struct *napi)
1382 {
1383 	struct sk_buff_head frames;
1384 	struct sk_buff *skb;
1385 
1386 	__skb_queue_head_init(&frames);
1387 
1388 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1389 		mt76_check_sta(dev, skb);
1390 		if (mtk_wed_device_active(&dev->mmio.wed))
1391 			__skb_queue_tail(&frames, skb);
1392 		else
1393 			mt76_rx_aggr_reorder(skb, &frames);
1394 	}
1395 
1396 	mt76_rx_complete(dev, &frames, napi);
1397 }
1398 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1399 
1400 static int
1401 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1402 	     struct ieee80211_sta *sta)
1403 {
1404 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1405 	struct mt76_dev *dev = phy->dev;
1406 	int ret;
1407 	int i;
1408 
1409 	mutex_lock(&dev->mutex);
1410 
1411 	ret = dev->drv->sta_add(dev, vif, sta);
1412 	if (ret)
1413 		goto out;
1414 
1415 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1416 		struct mt76_txq *mtxq;
1417 
1418 		if (!sta->txq[i])
1419 			continue;
1420 
1421 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1422 		mtxq->wcid = wcid->idx;
1423 	}
1424 
1425 	ewma_signal_init(&wcid->rssi);
1426 	if (phy->band_idx == MT_BAND1)
1427 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1428 	wcid->phy_idx = phy->band_idx;
1429 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1430 
1431 	mt76_wcid_init(wcid);
1432 out:
1433 	mutex_unlock(&dev->mutex);
1434 
1435 	return ret;
1436 }
1437 
1438 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1439 		       struct ieee80211_sta *sta)
1440 {
1441 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1442 	int i, idx = wcid->idx;
1443 
1444 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1445 		mt76_rx_aggr_stop(dev, wcid, i);
1446 
1447 	if (dev->drv->sta_remove)
1448 		dev->drv->sta_remove(dev, vif, sta);
1449 
1450 	mt76_wcid_cleanup(dev, wcid);
1451 
1452 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1453 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1454 }
1455 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1456 
1457 static void
1458 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1459 		struct ieee80211_sta *sta)
1460 {
1461 	mutex_lock(&dev->mutex);
1462 	__mt76_sta_remove(dev, vif, sta);
1463 	mutex_unlock(&dev->mutex);
1464 }
1465 
1466 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1467 		   struct ieee80211_sta *sta,
1468 		   enum ieee80211_sta_state old_state,
1469 		   enum ieee80211_sta_state new_state)
1470 {
1471 	struct mt76_phy *phy = hw->priv;
1472 	struct mt76_dev *dev = phy->dev;
1473 
1474 	if (old_state == IEEE80211_STA_NOTEXIST &&
1475 	    new_state == IEEE80211_STA_NONE)
1476 		return mt76_sta_add(phy, vif, sta);
1477 
1478 	if (old_state == IEEE80211_STA_AUTH &&
1479 	    new_state == IEEE80211_STA_ASSOC &&
1480 	    dev->drv->sta_assoc)
1481 		dev->drv->sta_assoc(dev, vif, sta);
1482 
1483 	if (old_state == IEEE80211_STA_NONE &&
1484 	    new_state == IEEE80211_STA_NOTEXIST)
1485 		mt76_sta_remove(dev, vif, sta);
1486 
1487 	return 0;
1488 }
1489 EXPORT_SYMBOL_GPL(mt76_sta_state);
1490 
1491 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1492 			     struct ieee80211_sta *sta)
1493 {
1494 	struct mt76_phy *phy = hw->priv;
1495 	struct mt76_dev *dev = phy->dev;
1496 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1497 
1498 	mutex_lock(&dev->mutex);
1499 	spin_lock_bh(&dev->status_lock);
1500 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1501 	spin_unlock_bh(&dev->status_lock);
1502 	mutex_unlock(&dev->mutex);
1503 }
1504 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1505 
1506 void mt76_wcid_init(struct mt76_wcid *wcid)
1507 {
1508 	INIT_LIST_HEAD(&wcid->tx_list);
1509 	skb_queue_head_init(&wcid->tx_pending);
1510 
1511 	INIT_LIST_HEAD(&wcid->list);
1512 	idr_init(&wcid->pktid);
1513 }
1514 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1515 
1516 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1517 {
1518 	struct mt76_phy *phy = dev->phys[wcid->phy_idx];
1519 	struct ieee80211_hw *hw;
1520 	struct sk_buff_head list;
1521 	struct sk_buff *skb;
1522 
1523 	mt76_tx_status_lock(dev, &list);
1524 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1525 	mt76_tx_status_unlock(dev, &list);
1526 
1527 	idr_destroy(&wcid->pktid);
1528 
1529 	spin_lock_bh(&phy->tx_lock);
1530 
1531 	if (!list_empty(&wcid->tx_list))
1532 		list_del_init(&wcid->tx_list);
1533 
1534 	spin_lock(&wcid->tx_pending.lock);
1535 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1536 	spin_unlock(&wcid->tx_pending.lock);
1537 
1538 	spin_unlock_bh(&phy->tx_lock);
1539 
1540 	while ((skb = __skb_dequeue(&list)) != NULL) {
1541 		hw = mt76_tx_status_get_hw(dev, skb);
1542 		ieee80211_free_txskb(hw, skb);
1543 	}
1544 }
1545 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1546 
1547 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1548 		     int *dbm)
1549 {
1550 	struct mt76_phy *phy = hw->priv;
1551 	int n_chains = hweight16(phy->chainmask);
1552 	int delta = mt76_tx_power_nss_delta(n_chains);
1553 
1554 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1555 
1556 	return 0;
1557 }
1558 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1559 
1560 int mt76_init_sar_power(struct ieee80211_hw *hw,
1561 			const struct cfg80211_sar_specs *sar)
1562 {
1563 	struct mt76_phy *phy = hw->priv;
1564 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1565 	int i;
1566 
1567 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1568 		return -EINVAL;
1569 
1570 	for (i = 0; i < sar->num_sub_specs; i++) {
1571 		u32 index = sar->sub_specs[i].freq_range_index;
1572 		/* SAR specifies power limitaton in 0.25dbm */
1573 		s32 power = sar->sub_specs[i].power >> 1;
1574 
1575 		if (power > 127 || power < -127)
1576 			power = 127;
1577 
1578 		phy->frp[index].range = &capa->freq_ranges[index];
1579 		phy->frp[index].power = power;
1580 	}
1581 
1582 	return 0;
1583 }
1584 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1585 
1586 int mt76_get_sar_power(struct mt76_phy *phy,
1587 		       struct ieee80211_channel *chan,
1588 		       int power)
1589 {
1590 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1591 	int freq, i;
1592 
1593 	if (!capa || !phy->frp)
1594 		return power;
1595 
1596 	if (power > 127 || power < -127)
1597 		power = 127;
1598 
1599 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1600 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1601 		if (phy->frp[i].range &&
1602 		    freq >= phy->frp[i].range->start_freq &&
1603 		    freq < phy->frp[i].range->end_freq) {
1604 			power = min_t(int, phy->frp[i].power, power);
1605 			break;
1606 		}
1607 	}
1608 
1609 	return power;
1610 }
1611 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1612 
1613 static void
1614 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1615 {
1616 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1617 		ieee80211_csa_finish(vif);
1618 }
1619 
1620 void mt76_csa_finish(struct mt76_dev *dev)
1621 {
1622 	if (!dev->csa_complete)
1623 		return;
1624 
1625 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1626 		IEEE80211_IFACE_ITER_RESUME_ALL,
1627 		__mt76_csa_finish, dev);
1628 
1629 	dev->csa_complete = 0;
1630 }
1631 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1632 
1633 static void
1634 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1635 {
1636 	struct mt76_dev *dev = priv;
1637 
1638 	if (!vif->bss_conf.csa_active)
1639 		return;
1640 
1641 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1642 }
1643 
1644 void mt76_csa_check(struct mt76_dev *dev)
1645 {
1646 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1647 		IEEE80211_IFACE_ITER_RESUME_ALL,
1648 		__mt76_csa_check, dev);
1649 }
1650 EXPORT_SYMBOL_GPL(mt76_csa_check);
1651 
1652 int
1653 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1654 {
1655 	return 0;
1656 }
1657 EXPORT_SYMBOL_GPL(mt76_set_tim);
1658 
1659 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1660 {
1661 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1662 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1663 	u8 *hdr, *pn = status->iv;
1664 
1665 	__skb_push(skb, 8);
1666 	memmove(skb->data, skb->data + 8, hdr_len);
1667 	hdr = skb->data + hdr_len;
1668 
1669 	hdr[0] = pn[5];
1670 	hdr[1] = pn[4];
1671 	hdr[2] = 0;
1672 	hdr[3] = 0x20 | (key_id << 6);
1673 	hdr[4] = pn[3];
1674 	hdr[5] = pn[2];
1675 	hdr[6] = pn[1];
1676 	hdr[7] = pn[0];
1677 
1678 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1679 }
1680 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1681 
1682 int mt76_get_rate(struct mt76_dev *dev,
1683 		  struct ieee80211_supported_band *sband,
1684 		  int idx, bool cck)
1685 {
1686 	int i, offset = 0, len = sband->n_bitrates;
1687 
1688 	if (cck) {
1689 		if (sband != &dev->phy.sband_2g.sband)
1690 			return 0;
1691 
1692 		idx &= ~BIT(2); /* short preamble */
1693 	} else if (sband == &dev->phy.sband_2g.sband) {
1694 		offset = 4;
1695 	}
1696 
1697 	for (i = offset; i < len; i++) {
1698 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1699 			return i;
1700 	}
1701 
1702 	return 0;
1703 }
1704 EXPORT_SYMBOL_GPL(mt76_get_rate);
1705 
1706 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1707 		  const u8 *mac)
1708 {
1709 	struct mt76_phy *phy = hw->priv;
1710 
1711 	set_bit(MT76_SCANNING, &phy->state);
1712 }
1713 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1714 
1715 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1716 {
1717 	struct mt76_phy *phy = hw->priv;
1718 
1719 	clear_bit(MT76_SCANNING, &phy->state);
1720 }
1721 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1722 
1723 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1724 {
1725 	struct mt76_phy *phy = hw->priv;
1726 	struct mt76_dev *dev = phy->dev;
1727 
1728 	mutex_lock(&dev->mutex);
1729 	*tx_ant = phy->antenna_mask;
1730 	*rx_ant = phy->antenna_mask;
1731 	mutex_unlock(&dev->mutex);
1732 
1733 	return 0;
1734 }
1735 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1736 
1737 struct mt76_queue *
1738 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1739 		int ring_base, void *wed, u32 flags)
1740 {
1741 	struct mt76_queue *hwq;
1742 	int err;
1743 
1744 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1745 	if (!hwq)
1746 		return ERR_PTR(-ENOMEM);
1747 
1748 	hwq->flags = flags;
1749 	hwq->wed = wed;
1750 
1751 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1752 	if (err < 0)
1753 		return ERR_PTR(err);
1754 
1755 	return hwq;
1756 }
1757 EXPORT_SYMBOL_GPL(mt76_init_queue);
1758 
1759 u16 mt76_calculate_default_rate(struct mt76_phy *phy,
1760 				struct ieee80211_vif *vif, int rateidx)
1761 {
1762 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1763 	struct cfg80211_chan_def *chandef = mvif->ctx ?
1764 					    &mvif->ctx->def :
1765 					    &phy->chandef;
1766 	int offset = 0;
1767 
1768 	if (chandef->chan->band != NL80211_BAND_2GHZ)
1769 		offset = 4;
1770 
1771 	/* pick the lowest rate for hidden nodes */
1772 	if (rateidx < 0)
1773 		rateidx = 0;
1774 
1775 	rateidx += offset;
1776 	if (rateidx >= ARRAY_SIZE(mt76_rates))
1777 		rateidx = offset;
1778 
1779 	return mt76_rates[rateidx].hw_value;
1780 }
1781 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1782 
1783 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1784 			 struct mt76_sta_stats *stats, bool eht)
1785 {
1786 	int i, ei = wi->initial_stat_idx;
1787 	u64 *data = wi->data;
1788 
1789 	wi->sta_count++;
1790 
1791 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1792 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1793 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1794 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1795 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1796 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1797 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1798 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1799 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1800 	if (eht) {
1801 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1802 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1803 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1804 	}
1805 
1806 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1807 		data[ei++] += stats->tx_bw[i];
1808 
1809 	for (i = 0; i < (eht ? 14 : 12); i++)
1810 		data[ei++] += stats->tx_mcs[i];
1811 
1812 	for (i = 0; i < 4; i++)
1813 		data[ei++] += stats->tx_nss[i];
1814 
1815 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1816 }
1817 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1818 
1819 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1820 {
1821 #ifdef CONFIG_PAGE_POOL_STATS
1822 	struct page_pool_stats stats = {};
1823 	int i;
1824 
1825 	mt76_for_each_q_rx(dev, i)
1826 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1827 
1828 	page_pool_ethtool_stats_get(data, &stats);
1829 	*index += page_pool_ethtool_stats_get_count();
1830 #endif
1831 }
1832 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1833 
1834 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1835 {
1836 	struct ieee80211_hw *hw = phy->hw;
1837 	struct mt76_dev *dev = phy->dev;
1838 
1839 	if (dev->region == NL80211_DFS_UNSET ||
1840 	    test_bit(MT76_SCANNING, &phy->state))
1841 		return MT_DFS_STATE_DISABLED;
1842 
1843 	if (!hw->conf.radar_enabled) {
1844 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1845 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1846 			return MT_DFS_STATE_ACTIVE;
1847 
1848 		return MT_DFS_STATE_DISABLED;
1849 	}
1850 
1851 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1852 		return MT_DFS_STATE_CAC;
1853 
1854 	return MT_DFS_STATE_ACTIVE;
1855 }
1856 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1857 
1858 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
1859 int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1860 		      struct net_device *netdev, enum tc_setup_type type,
1861 		      void *type_data)
1862 {
1863 	struct mt76_phy *phy = hw->priv;
1864 	struct mtk_wed_device *wed = &phy->dev->mmio.wed;
1865 
1866 	if (!mtk_wed_device_active(wed))
1867 		return -EOPNOTSUPP;
1868 
1869 	return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
1870 }
1871 EXPORT_SYMBOL_GPL(mt76_net_setup_tc);
1872 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */
1873