xref: /linux/drivers/net/wireless/mediatek/mt76/mac80211.c (revision 38a45bead2be0bd481f3143dc4fe451cb9d09823)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 	struct device_node *np = dev->dev->of_node;
201 
202 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 		return 0;
204 
205 	np = of_get_child_by_name(np, "led");
206 	if (np) {
207 		if (!of_device_is_available(np)) {
208 			of_node_put(np);
209 			dev_info(dev->dev,
210 				"led registration was explicitly disabled by dts\n");
211 			return 0;
212 		}
213 
214 		if (phy == &dev->phy) {
215 			int led_pin;
216 
217 			if (!of_property_read_u32(np, "led-sources", &led_pin))
218 				phy->leds.pin = led_pin;
219 
220 			phy->leds.al =
221 				of_property_read_bool(np, "led-active-low");
222 		}
223 
224 		of_node_put(np);
225 	}
226 
227 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 		 wiphy_name(hw->wiphy));
229 
230 	phy->leds.cdev.name = phy->leds.name;
231 	phy->leds.cdev.default_trigger =
232 		ieee80211_create_tpt_led_trigger(hw,
233 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 					mt76_tpt_blink,
235 					ARRAY_SIZE(mt76_tpt_blink));
236 
237 	dev_info(dev->dev,
238 		"registering led '%s'\n", phy->leds.name);
239 
240 	return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242 
243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 		return;
247 
248 	led_classdev_unregister(&phy->leds.cdev);
249 }
250 
251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 				 struct ieee80211_supported_band *sband,
253 				 bool vht)
254 {
255 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 	int i, nstream = hweight8(phy->antenna_mask);
257 	struct ieee80211_sta_vht_cap *vht_cap;
258 	u16 mcs_map = 0;
259 
260 	if (nstream > 1)
261 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 	else
263 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264 
265 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267 
268 	if (!vht)
269 		return;
270 
271 	vht_cap = &sband->vht_cap;
272 	if (nstream > 1)
273 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 	else
275 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278 
279 	for (i = 0; i < 8; i++) {
280 		if (i < nstream)
281 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 		else
283 			mcs_map |=
284 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 	}
286 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 		vht_cap->vht_mcs.tx_highest |=
290 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292 
293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 	if (phy->cap.has_2ghz)
296 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 	if (phy->cap.has_5ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 	if (phy->cap.has_6ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303 
304 static int
305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 		const struct ieee80211_channel *chan, int n_chan,
307 		struct ieee80211_rate *rates, int n_rates,
308 		bool ht, bool vht)
309 {
310 	struct ieee80211_supported_band *sband = &msband->sband;
311 	struct ieee80211_sta_vht_cap *vht_cap;
312 	struct ieee80211_sta_ht_cap *ht_cap;
313 	struct mt76_dev *dev = phy->dev;
314 	void *chanlist;
315 	int size;
316 
317 	size = n_chan * sizeof(*chan);
318 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 	if (!chanlist)
320 		return -ENOMEM;
321 
322 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 				    GFP_KERNEL);
324 	if (!msband->chan)
325 		return -ENOMEM;
326 
327 	sband->channels = chanlist;
328 	sband->n_channels = n_chan;
329 	sband->bitrates = rates;
330 	sband->n_bitrates = n_rates;
331 
332 	if (!ht)
333 		return 0;
334 
335 	ht_cap = &sband->ht_cap;
336 	ht_cap->ht_supported = true;
337 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 		       IEEE80211_HT_CAP_GRN_FLD |
339 		       IEEE80211_HT_CAP_SGI_20 |
340 		       IEEE80211_HT_CAP_SGI_40 |
341 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342 
343 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345 
346 	mt76_init_stream_cap(phy, sband, vht);
347 
348 	if (!vht)
349 		return 0;
350 
351 	vht_cap = &sband->vht_cap;
352 	vht_cap->vht_supported = true;
353 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 			IEEE80211_VHT_CAP_RXSTBC_1 |
355 			IEEE80211_VHT_CAP_SHORT_GI_80 |
356 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357 
358 	return 0;
359 }
360 
361 static int
362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369 			       n_rates, true, false);
370 }
371 
372 static int
373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates, bool vht)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380 			       n_rates, true, vht);
381 }
382 
383 static int
384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 		   int n_rates)
386 {
387 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388 
389 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391 			       n_rates, false, false);
392 }
393 
394 static void
395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 		 enum nl80211_band band)
397 {
398 	struct ieee80211_supported_band *sband = &msband->sband;
399 	bool found = false;
400 	int i;
401 
402 	if (!sband)
403 		return;
404 
405 	for (i = 0; i < sband->n_channels; i++) {
406 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 			continue;
408 
409 		found = true;
410 		break;
411 	}
412 
413 	if (found) {
414 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
415 					NL80211_CHAN_HT20);
416 		phy->chan_state = &msband->chan[0];
417 		phy->dev->band_phys[band] = phy;
418 		return;
419 	}
420 
421 	sband->n_channels = 0;
422 	if (phy->hw->wiphy->bands[band] == sband)
423 		phy->hw->wiphy->bands[band] = NULL;
424 }
425 
426 static int
427 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
428 {
429 	struct mt76_dev *dev = phy->dev;
430 	struct wiphy *wiphy = hw->wiphy;
431 
432 	INIT_LIST_HEAD(&phy->tx_list);
433 	spin_lock_init(&phy->tx_lock);
434 
435 	if ((void *)phy != hw->priv)
436 		return 0;
437 
438 	SET_IEEE80211_DEV(hw, dev->dev);
439 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
440 
441 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
442 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
443 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
444 			WIPHY_FLAG_SUPPORTS_TDLS |
445 			WIPHY_FLAG_AP_UAPSD;
446 
447 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
448 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
449 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
450 
451 	wiphy->available_antennas_tx = phy->antenna_mask;
452 	wiphy->available_antennas_rx = phy->antenna_mask;
453 
454 	wiphy->sar_capa = &mt76_sar_capa;
455 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
456 				sizeof(struct mt76_freq_range_power),
457 				GFP_KERNEL);
458 	if (!phy->frp)
459 		return -ENOMEM;
460 
461 	hw->txq_data_size = sizeof(struct mt76_txq);
462 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
463 
464 	if (!hw->max_tx_fragments)
465 		hw->max_tx_fragments = 16;
466 
467 	ieee80211_hw_set(hw, SIGNAL_DBM);
468 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
469 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
470 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
471 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
472 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
473 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
474 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
475 
476 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
477 	    hw->max_tx_fragments > 1) {
478 		ieee80211_hw_set(hw, TX_AMSDU);
479 		ieee80211_hw_set(hw, TX_FRAG_LIST);
480 	}
481 
482 	ieee80211_hw_set(hw, MFP_CAPABLE);
483 	ieee80211_hw_set(hw, AP_LINK_PS);
484 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
485 
486 	return 0;
487 }
488 
489 struct mt76_phy *
490 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
491 		     u8 band_idx)
492 {
493 	struct ieee80211_hw *hw = dev->phy.hw;
494 	unsigned int phy_size;
495 	struct mt76_phy *phy;
496 
497 	phy_size = ALIGN(sizeof(*phy), 8);
498 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
499 	if (!phy)
500 		return NULL;
501 
502 	phy->dev = dev;
503 	phy->hw = hw;
504 	phy->priv = (void *)phy + phy_size;
505 	phy->band_idx = band_idx;
506 
507 	return phy;
508 }
509 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
510 
511 struct mt76_phy *
512 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
513 	       const struct ieee80211_ops *ops, u8 band_idx)
514 {
515 	struct ieee80211_hw *hw;
516 	unsigned int phy_size;
517 	struct mt76_phy *phy;
518 
519 	phy_size = ALIGN(sizeof(*phy), 8);
520 	hw = ieee80211_alloc_hw(size + phy_size, ops);
521 	if (!hw)
522 		return NULL;
523 
524 	phy = hw->priv;
525 	phy->dev = dev;
526 	phy->hw = hw;
527 	phy->priv = hw->priv + phy_size;
528 	phy->band_idx = band_idx;
529 
530 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
531 	hw->wiphy->interface_modes =
532 		BIT(NL80211_IFTYPE_STATION) |
533 		BIT(NL80211_IFTYPE_AP) |
534 #ifdef CONFIG_MAC80211_MESH
535 		BIT(NL80211_IFTYPE_MESH_POINT) |
536 #endif
537 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
538 		BIT(NL80211_IFTYPE_P2P_GO) |
539 		BIT(NL80211_IFTYPE_ADHOC);
540 
541 	return phy;
542 }
543 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
544 
545 int mt76_register_phy(struct mt76_phy *phy, bool vht,
546 		      struct ieee80211_rate *rates, int n_rates)
547 {
548 	int ret;
549 
550 	ret = mt76_phy_init(phy, phy->hw);
551 	if (ret)
552 		return ret;
553 
554 	if (phy->cap.has_2ghz) {
555 		ret = mt76_init_sband_2g(phy, rates, n_rates);
556 		if (ret)
557 			return ret;
558 	}
559 
560 	if (phy->cap.has_5ghz) {
561 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
562 		if (ret)
563 			return ret;
564 	}
565 
566 	if (phy->cap.has_6ghz) {
567 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
568 		if (ret)
569 			return ret;
570 	}
571 
572 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
573 		ret = mt76_led_init(phy);
574 		if (ret)
575 			return ret;
576 	}
577 
578 	wiphy_read_of_freq_limits(phy->hw->wiphy);
579 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
580 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
581 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
582 
583 	if ((void *)phy == phy->hw->priv) {
584 		ret = ieee80211_register_hw(phy->hw);
585 		if (ret)
586 			return ret;
587 	}
588 
589 	set_bit(MT76_STATE_REGISTERED, &phy->state);
590 	phy->dev->phys[phy->band_idx] = phy;
591 
592 	return 0;
593 }
594 EXPORT_SYMBOL_GPL(mt76_register_phy);
595 
596 void mt76_unregister_phy(struct mt76_phy *phy)
597 {
598 	struct mt76_dev *dev = phy->dev;
599 
600 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
601 		return;
602 
603 	if (IS_ENABLED(CONFIG_MT76_LEDS))
604 		mt76_led_cleanup(phy);
605 	mt76_tx_status_check(dev, true);
606 	ieee80211_unregister_hw(phy->hw);
607 	dev->phys[phy->band_idx] = NULL;
608 }
609 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
610 
611 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
612 {
613 	bool is_qrx = mt76_queue_is_rx(dev, q);
614 	struct page_pool_params pp_params = {
615 		.order = 0,
616 		.flags = 0,
617 		.nid = NUMA_NO_NODE,
618 		.dev = dev->dma_dev,
619 	};
620 	int idx = is_qrx ? q - dev->q_rx : -1;
621 
622 	/* Allocate page_pools just for rx/wed_tx_free queues */
623 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
624 		return 0;
625 
626 	switch (idx) {
627 	case MT_RXQ_MAIN:
628 	case MT_RXQ_BAND1:
629 	case MT_RXQ_BAND2:
630 		pp_params.pool_size = 256;
631 		break;
632 	default:
633 		pp_params.pool_size = 16;
634 		break;
635 	}
636 
637 	if (mt76_is_mmio(dev)) {
638 		/* rely on page_pool for DMA mapping */
639 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
640 		pp_params.dma_dir = DMA_FROM_DEVICE;
641 		pp_params.max_len = PAGE_SIZE;
642 		pp_params.offset = 0;
643 		/* NAPI is available just for rx queues */
644 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
645 			pp_params.napi = &dev->napi[idx];
646 	}
647 
648 	q->page_pool = page_pool_create(&pp_params);
649 	if (IS_ERR(q->page_pool)) {
650 		int err = PTR_ERR(q->page_pool);
651 
652 		q->page_pool = NULL;
653 		return err;
654 	}
655 
656 	return 0;
657 }
658 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
659 
660 struct mt76_dev *
661 mt76_alloc_device(struct device *pdev, unsigned int size,
662 		  const struct ieee80211_ops *ops,
663 		  const struct mt76_driver_ops *drv_ops)
664 {
665 	struct ieee80211_hw *hw;
666 	struct mt76_phy *phy;
667 	struct mt76_dev *dev;
668 	int i;
669 
670 	hw = ieee80211_alloc_hw(size, ops);
671 	if (!hw)
672 		return NULL;
673 
674 	dev = hw->priv;
675 	dev->hw = hw;
676 	dev->dev = pdev;
677 	dev->drv = drv_ops;
678 	dev->dma_dev = pdev;
679 
680 	phy = &dev->phy;
681 	phy->dev = dev;
682 	phy->hw = hw;
683 	phy->band_idx = MT_BAND0;
684 	dev->phys[phy->band_idx] = phy;
685 
686 	spin_lock_init(&dev->rx_lock);
687 	spin_lock_init(&dev->lock);
688 	spin_lock_init(&dev->cc_lock);
689 	spin_lock_init(&dev->status_lock);
690 	spin_lock_init(&dev->wed_lock);
691 	mutex_init(&dev->mutex);
692 	init_waitqueue_head(&dev->tx_wait);
693 
694 	skb_queue_head_init(&dev->mcu.res_q);
695 	init_waitqueue_head(&dev->mcu.wait);
696 	mutex_init(&dev->mcu.mutex);
697 	dev->tx_worker.fn = mt76_tx_worker;
698 
699 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
700 	hw->wiphy->interface_modes =
701 		BIT(NL80211_IFTYPE_STATION) |
702 		BIT(NL80211_IFTYPE_AP) |
703 #ifdef CONFIG_MAC80211_MESH
704 		BIT(NL80211_IFTYPE_MESH_POINT) |
705 #endif
706 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
707 		BIT(NL80211_IFTYPE_P2P_GO) |
708 		BIT(NL80211_IFTYPE_ADHOC);
709 
710 	spin_lock_init(&dev->token_lock);
711 	idr_init(&dev->token);
712 
713 	spin_lock_init(&dev->rx_token_lock);
714 	idr_init(&dev->rx_token);
715 
716 	INIT_LIST_HEAD(&dev->wcid_list);
717 	INIT_LIST_HEAD(&dev->sta_poll_list);
718 	spin_lock_init(&dev->sta_poll_lock);
719 
720 	INIT_LIST_HEAD(&dev->txwi_cache);
721 	INIT_LIST_HEAD(&dev->rxwi_cache);
722 	dev->token_size = dev->drv->token_size;
723 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
724 
725 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
726 		skb_queue_head_init(&dev->rx_skb[i]);
727 
728 	dev->wq = alloc_ordered_workqueue("mt76", 0);
729 	if (!dev->wq) {
730 		ieee80211_free_hw(hw);
731 		return NULL;
732 	}
733 
734 	return dev;
735 }
736 EXPORT_SYMBOL_GPL(mt76_alloc_device);
737 
738 int mt76_register_device(struct mt76_dev *dev, bool vht,
739 			 struct ieee80211_rate *rates, int n_rates)
740 {
741 	struct ieee80211_hw *hw = dev->hw;
742 	struct mt76_phy *phy = &dev->phy;
743 	int ret;
744 
745 	dev_set_drvdata(dev->dev, dev);
746 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
747 	ret = mt76_phy_init(phy, hw);
748 	if (ret)
749 		return ret;
750 
751 	if (phy->cap.has_2ghz) {
752 		ret = mt76_init_sband_2g(phy, rates, n_rates);
753 		if (ret)
754 			return ret;
755 	}
756 
757 	if (phy->cap.has_5ghz) {
758 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
759 		if (ret)
760 			return ret;
761 	}
762 
763 	if (phy->cap.has_6ghz) {
764 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
765 		if (ret)
766 			return ret;
767 	}
768 
769 	wiphy_read_of_freq_limits(hw->wiphy);
770 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
771 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
772 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
773 
774 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
775 		ret = mt76_led_init(phy);
776 		if (ret)
777 			return ret;
778 	}
779 
780 	ret = ieee80211_register_hw(hw);
781 	if (ret)
782 		return ret;
783 
784 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
785 	set_bit(MT76_STATE_REGISTERED, &phy->state);
786 	sched_set_fifo_low(dev->tx_worker.task);
787 
788 	return 0;
789 }
790 EXPORT_SYMBOL_GPL(mt76_register_device);
791 
792 void mt76_unregister_device(struct mt76_dev *dev)
793 {
794 	struct ieee80211_hw *hw = dev->hw;
795 
796 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
797 		return;
798 
799 	if (IS_ENABLED(CONFIG_MT76_LEDS))
800 		mt76_led_cleanup(&dev->phy);
801 	mt76_tx_status_check(dev, true);
802 	mt76_wcid_cleanup(dev, &dev->global_wcid);
803 	ieee80211_unregister_hw(hw);
804 }
805 EXPORT_SYMBOL_GPL(mt76_unregister_device);
806 
807 void mt76_free_device(struct mt76_dev *dev)
808 {
809 	mt76_worker_teardown(&dev->tx_worker);
810 	if (dev->wq) {
811 		destroy_workqueue(dev->wq);
812 		dev->wq = NULL;
813 	}
814 	ieee80211_free_hw(dev->hw);
815 }
816 EXPORT_SYMBOL_GPL(mt76_free_device);
817 
818 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
819 {
820 	struct sk_buff *skb = phy->rx_amsdu[q].head;
821 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
822 	struct mt76_dev *dev = phy->dev;
823 
824 	phy->rx_amsdu[q].head = NULL;
825 	phy->rx_amsdu[q].tail = NULL;
826 
827 	/*
828 	 * Validate if the amsdu has a proper first subframe.
829 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
830 	 * flag of the QoS header gets flipped. In such cases, the first
831 	 * subframe has a LLC/SNAP header in the location of the destination
832 	 * address.
833 	 */
834 	if (skb_shinfo(skb)->frag_list) {
835 		int offset = 0;
836 
837 		if (!(status->flag & RX_FLAG_8023)) {
838 			offset = ieee80211_get_hdrlen_from_skb(skb);
839 
840 			if ((status->flag &
841 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
842 			    RX_FLAG_DECRYPTED)
843 				offset += 8;
844 		}
845 
846 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
847 			dev_kfree_skb(skb);
848 			return;
849 		}
850 	}
851 	__skb_queue_tail(&dev->rx_skb[q], skb);
852 }
853 
854 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
855 				  struct sk_buff *skb)
856 {
857 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
858 
859 	if (phy->rx_amsdu[q].head &&
860 	    (!status->amsdu || status->first_amsdu ||
861 	     status->seqno != phy->rx_amsdu[q].seqno))
862 		mt76_rx_release_amsdu(phy, q);
863 
864 	if (!phy->rx_amsdu[q].head) {
865 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
866 		phy->rx_amsdu[q].seqno = status->seqno;
867 		phy->rx_amsdu[q].head = skb;
868 	} else {
869 		*phy->rx_amsdu[q].tail = skb;
870 		phy->rx_amsdu[q].tail = &skb->next;
871 	}
872 
873 	if (!status->amsdu || status->last_amsdu)
874 		mt76_rx_release_amsdu(phy, q);
875 }
876 
877 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
878 {
879 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
880 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
881 
882 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
883 		dev_kfree_skb(skb);
884 		return;
885 	}
886 
887 #ifdef CONFIG_NL80211_TESTMODE
888 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
889 		phy->test.rx_stats.packets[q]++;
890 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
891 			phy->test.rx_stats.fcs_error[q]++;
892 	}
893 #endif
894 
895 	mt76_rx_release_burst(phy, q, skb);
896 }
897 EXPORT_SYMBOL_GPL(mt76_rx);
898 
899 bool mt76_has_tx_pending(struct mt76_phy *phy)
900 {
901 	struct mt76_queue *q;
902 	int i;
903 
904 	for (i = 0; i < __MT_TXQ_MAX; i++) {
905 		q = phy->q_tx[i];
906 		if (q && q->queued)
907 			return true;
908 	}
909 
910 	return false;
911 }
912 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
913 
914 static struct mt76_channel_state *
915 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
916 {
917 	struct mt76_sband *msband;
918 	int idx;
919 
920 	if (c->band == NL80211_BAND_2GHZ)
921 		msband = &phy->sband_2g;
922 	else if (c->band == NL80211_BAND_6GHZ)
923 		msband = &phy->sband_6g;
924 	else
925 		msband = &phy->sband_5g;
926 
927 	idx = c - &msband->sband.channels[0];
928 	return &msband->chan[idx];
929 }
930 
931 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
932 {
933 	struct mt76_channel_state *state = phy->chan_state;
934 
935 	state->cc_active += ktime_to_us(ktime_sub(time,
936 						  phy->survey_time));
937 	phy->survey_time = time;
938 }
939 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
940 
941 void mt76_update_survey(struct mt76_phy *phy)
942 {
943 	struct mt76_dev *dev = phy->dev;
944 	ktime_t cur_time;
945 
946 	if (dev->drv->update_survey)
947 		dev->drv->update_survey(phy);
948 
949 	cur_time = ktime_get_boottime();
950 	mt76_update_survey_active_time(phy, cur_time);
951 
952 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
953 		struct mt76_channel_state *state = phy->chan_state;
954 
955 		spin_lock_bh(&dev->cc_lock);
956 		state->cc_bss_rx += dev->cur_cc_bss_rx;
957 		dev->cur_cc_bss_rx = 0;
958 		spin_unlock_bh(&dev->cc_lock);
959 	}
960 }
961 EXPORT_SYMBOL_GPL(mt76_update_survey);
962 
963 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
964 		       bool offchannel)
965 {
966 	struct mt76_dev *dev = phy->dev;
967 	int timeout = HZ / 5;
968 	int ret;
969 
970 	set_bit(MT76_RESET, &phy->state);
971 
972 	mt76_worker_disable(&dev->tx_worker);
973 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
974 	mt76_update_survey(phy);
975 
976 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
977 	    phy->chandef.width != chandef->width)
978 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
979 
980 	phy->chandef = *chandef;
981 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
982 	phy->offchannel = offchannel;
983 
984 	if (!offchannel)
985 		phy->main_chandef = *chandef;
986 
987 	if (chandef->chan != phy->main_chandef.chan)
988 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
989 
990 	ret = dev->drv->set_channel(phy);
991 
992 	clear_bit(MT76_RESET, &phy->state);
993 	mt76_worker_enable(&dev->tx_worker);
994 	mt76_worker_schedule(&dev->tx_worker);
995 
996 	return ret;
997 }
998 
999 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1000 		     bool offchannel)
1001 {
1002 	struct mt76_dev *dev = phy->dev;
1003 	int ret;
1004 
1005 	cancel_delayed_work_sync(&phy->mac_work);
1006 
1007 	mutex_lock(&dev->mutex);
1008 	ret = __mt76_set_channel(phy, chandef, offchannel);
1009 	mutex_unlock(&dev->mutex);
1010 
1011 	return ret;
1012 }
1013 
1014 int mt76_update_channel(struct mt76_phy *phy)
1015 {
1016 	struct ieee80211_hw *hw = phy->hw;
1017 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1018 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1019 
1020 	phy->radar_enabled = hw->conf.radar_enabled;
1021 
1022 	return mt76_set_channel(phy, chandef, offchannel);
1023 }
1024 EXPORT_SYMBOL_GPL(mt76_update_channel);
1025 
1026 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1027 		    struct survey_info *survey)
1028 {
1029 	struct mt76_phy *phy = hw->priv;
1030 	struct mt76_dev *dev = phy->dev;
1031 	struct mt76_sband *sband;
1032 	struct ieee80211_channel *chan;
1033 	struct mt76_channel_state *state;
1034 	int ret = 0;
1035 
1036 	mutex_lock(&dev->mutex);
1037 	if (idx == 0 && dev->drv->update_survey)
1038 		mt76_update_survey(phy);
1039 
1040 	if (idx >= phy->sband_2g.sband.n_channels +
1041 		   phy->sband_5g.sband.n_channels) {
1042 		idx -= (phy->sband_2g.sband.n_channels +
1043 			phy->sband_5g.sband.n_channels);
1044 		sband = &phy->sband_6g;
1045 	} else if (idx >= phy->sband_2g.sband.n_channels) {
1046 		idx -= phy->sband_2g.sband.n_channels;
1047 		sband = &phy->sband_5g;
1048 	} else {
1049 		sband = &phy->sband_2g;
1050 	}
1051 
1052 	if (idx >= sband->sband.n_channels) {
1053 		ret = -ENOENT;
1054 		goto out;
1055 	}
1056 
1057 	chan = &sband->sband.channels[idx];
1058 	state = mt76_channel_state(phy, chan);
1059 
1060 	memset(survey, 0, sizeof(*survey));
1061 	survey->channel = chan;
1062 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1063 	survey->filled |= dev->drv->survey_flags;
1064 	if (state->noise)
1065 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1066 
1067 	if (chan == phy->main_chandef.chan) {
1068 		survey->filled |= SURVEY_INFO_IN_USE;
1069 
1070 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1071 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1072 	}
1073 
1074 	survey->time_busy = div_u64(state->cc_busy, 1000);
1075 	survey->time_rx = div_u64(state->cc_rx, 1000);
1076 	survey->time = div_u64(state->cc_active, 1000);
1077 	survey->noise = state->noise;
1078 
1079 	spin_lock_bh(&dev->cc_lock);
1080 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1081 	survey->time_tx = div_u64(state->cc_tx, 1000);
1082 	spin_unlock_bh(&dev->cc_lock);
1083 
1084 out:
1085 	mutex_unlock(&dev->mutex);
1086 
1087 	return ret;
1088 }
1089 EXPORT_SYMBOL_GPL(mt76_get_survey);
1090 
1091 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1092 			 struct ieee80211_key_conf *key)
1093 {
1094 	struct ieee80211_key_seq seq;
1095 	int i;
1096 
1097 	wcid->rx_check_pn = false;
1098 
1099 	if (!key)
1100 		return;
1101 
1102 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1103 		return;
1104 
1105 	wcid->rx_check_pn = true;
1106 
1107 	/* data frame */
1108 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1109 		ieee80211_get_key_rx_seq(key, i, &seq);
1110 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1111 	}
1112 
1113 	/* robust management frame */
1114 	ieee80211_get_key_rx_seq(key, -1, &seq);
1115 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1116 
1117 }
1118 EXPORT_SYMBOL(mt76_wcid_key_setup);
1119 
1120 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1121 {
1122 	int signal = -128;
1123 	u8 chains;
1124 
1125 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1126 		int cur, diff;
1127 
1128 		cur = *chain_signal;
1129 		if (!(chains & BIT(0)) ||
1130 		    cur > 0)
1131 			continue;
1132 
1133 		if (cur > signal)
1134 			swap(cur, signal);
1135 
1136 		diff = signal - cur;
1137 		if (diff == 0)
1138 			signal += 3;
1139 		else if (diff <= 2)
1140 			signal += 2;
1141 		else if (diff <= 6)
1142 			signal += 1;
1143 	}
1144 
1145 	return signal;
1146 }
1147 EXPORT_SYMBOL(mt76_rx_signal);
1148 
1149 static void
1150 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1151 		struct ieee80211_hw **hw,
1152 		struct ieee80211_sta **sta)
1153 {
1154 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1155 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1156 	struct mt76_rx_status mstat;
1157 
1158 	mstat = *((struct mt76_rx_status *)skb->cb);
1159 	memset(status, 0, sizeof(*status));
1160 
1161 	status->flag = mstat.flag;
1162 	status->freq = mstat.freq;
1163 	status->enc_flags = mstat.enc_flags;
1164 	status->encoding = mstat.encoding;
1165 	status->bw = mstat.bw;
1166 	if (status->encoding == RX_ENC_EHT) {
1167 		status->eht.ru = mstat.eht.ru;
1168 		status->eht.gi = mstat.eht.gi;
1169 	} else {
1170 		status->he_ru = mstat.he_ru;
1171 		status->he_gi = mstat.he_gi;
1172 		status->he_dcm = mstat.he_dcm;
1173 	}
1174 	status->rate_idx = mstat.rate_idx;
1175 	status->nss = mstat.nss;
1176 	status->band = mstat.band;
1177 	status->signal = mstat.signal;
1178 	status->chains = mstat.chains;
1179 	status->ampdu_reference = mstat.ampdu_ref;
1180 	status->device_timestamp = mstat.timestamp;
1181 	status->mactime = mstat.timestamp;
1182 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1183 	if (status->signal <= -128)
1184 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1185 
1186 	if (ieee80211_is_beacon(hdr->frame_control) ||
1187 	    ieee80211_is_probe_resp(hdr->frame_control))
1188 		status->boottime_ns = ktime_get_boottime_ns();
1189 
1190 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1191 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1192 		     sizeof(mstat.chain_signal));
1193 	memcpy(status->chain_signal, mstat.chain_signal,
1194 	       sizeof(mstat.chain_signal));
1195 
1196 	if (mstat.wcid) {
1197 		status->link_valid = mstat.wcid->link_valid;
1198 		status->link_id = mstat.wcid->link_id;
1199 	}
1200 
1201 	*sta = wcid_to_sta(mstat.wcid);
1202 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1203 }
1204 
1205 static void
1206 mt76_check_ccmp_pn(struct sk_buff *skb)
1207 {
1208 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1209 	struct mt76_wcid *wcid = status->wcid;
1210 	struct ieee80211_hdr *hdr;
1211 	int security_idx;
1212 	int ret;
1213 
1214 	if (!(status->flag & RX_FLAG_DECRYPTED))
1215 		return;
1216 
1217 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1218 		return;
1219 
1220 	if (!wcid || !wcid->rx_check_pn)
1221 		return;
1222 
1223 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1224 	if (status->flag & RX_FLAG_8023)
1225 		goto skip_hdr_check;
1226 
1227 	hdr = mt76_skb_get_hdr(skb);
1228 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1229 		/*
1230 		 * Validate the first fragment both here and in mac80211
1231 		 * All further fragments will be validated by mac80211 only.
1232 		 */
1233 		if (ieee80211_is_frag(hdr) &&
1234 		    !ieee80211_is_first_frag(hdr->frame_control))
1235 			return;
1236 	}
1237 
1238 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1239 	 *
1240 	 * the recipient shall maintain a single replay counter for received
1241 	 * individually addressed robust Management frames that are received
1242 	 * with the To DS subfield equal to 0, [...]
1243 	 */
1244 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1245 	    !ieee80211_has_tods(hdr->frame_control))
1246 		security_idx = IEEE80211_NUM_TIDS;
1247 
1248 skip_hdr_check:
1249 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1250 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1251 		     sizeof(status->iv));
1252 	if (ret <= 0) {
1253 		status->flag |= RX_FLAG_ONLY_MONITOR;
1254 		return;
1255 	}
1256 
1257 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1258 
1259 	if (status->flag & RX_FLAG_IV_STRIPPED)
1260 		status->flag |= RX_FLAG_PN_VALIDATED;
1261 }
1262 
1263 static void
1264 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1265 		    int len)
1266 {
1267 	struct mt76_wcid *wcid = status->wcid;
1268 	struct ieee80211_rx_status info = {
1269 		.enc_flags = status->enc_flags,
1270 		.rate_idx = status->rate_idx,
1271 		.encoding = status->encoding,
1272 		.band = status->band,
1273 		.nss = status->nss,
1274 		.bw = status->bw,
1275 	};
1276 	struct ieee80211_sta *sta;
1277 	u32 airtime;
1278 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1279 
1280 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1281 	spin_lock(&dev->cc_lock);
1282 	dev->cur_cc_bss_rx += airtime;
1283 	spin_unlock(&dev->cc_lock);
1284 
1285 	if (!wcid || !wcid->sta)
1286 		return;
1287 
1288 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1289 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1290 }
1291 
1292 static void
1293 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1294 {
1295 	struct mt76_wcid *wcid;
1296 	int wcid_idx;
1297 
1298 	if (!dev->rx_ampdu_len)
1299 		return;
1300 
1301 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1302 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1303 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1304 	else
1305 		wcid = NULL;
1306 	dev->rx_ampdu_status.wcid = wcid;
1307 
1308 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1309 
1310 	dev->rx_ampdu_len = 0;
1311 	dev->rx_ampdu_ref = 0;
1312 }
1313 
1314 static void
1315 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1316 {
1317 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1318 	struct mt76_wcid *wcid = status->wcid;
1319 
1320 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1321 		return;
1322 
1323 	if (!wcid || !wcid->sta) {
1324 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1325 
1326 		if (status->flag & RX_FLAG_8023)
1327 			return;
1328 
1329 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1330 			return;
1331 
1332 		wcid = NULL;
1333 	}
1334 
1335 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1336 	    status->ampdu_ref != dev->rx_ampdu_ref)
1337 		mt76_airtime_flush_ampdu(dev);
1338 
1339 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1340 		if (!dev->rx_ampdu_len ||
1341 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1342 			dev->rx_ampdu_status = *status;
1343 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1344 			dev->rx_ampdu_ref = status->ampdu_ref;
1345 		}
1346 
1347 		dev->rx_ampdu_len += skb->len;
1348 		return;
1349 	}
1350 
1351 	mt76_airtime_report(dev, status, skb->len);
1352 }
1353 
1354 static void
1355 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1356 {
1357 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1358 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1359 	struct ieee80211_sta *sta;
1360 	struct ieee80211_hw *hw;
1361 	struct mt76_wcid *wcid = status->wcid;
1362 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1363 	bool ps;
1364 
1365 	hw = mt76_phy_hw(dev, status->phy_idx);
1366 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1367 	    !(status->flag & RX_FLAG_8023)) {
1368 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1369 		if (sta)
1370 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1371 	}
1372 
1373 	mt76_airtime_check(dev, skb);
1374 
1375 	if (!wcid || !wcid->sta)
1376 		return;
1377 
1378 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1379 
1380 	if (status->signal <= 0)
1381 		ewma_signal_add(&wcid->rssi, -status->signal);
1382 
1383 	wcid->inactive_count = 0;
1384 
1385 	if (status->flag & RX_FLAG_8023)
1386 		return;
1387 
1388 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1389 		return;
1390 
1391 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1392 		ieee80211_sta_pspoll(sta);
1393 		return;
1394 	}
1395 
1396 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1397 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1398 	      ieee80211_is_data(hdr->frame_control)))
1399 		return;
1400 
1401 	ps = ieee80211_has_pm(hdr->frame_control);
1402 
1403 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1404 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1405 		ieee80211_sta_uapsd_trigger(sta, tidno);
1406 
1407 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1408 		return;
1409 
1410 	if (ps)
1411 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1412 
1413 	if (dev->drv->sta_ps)
1414 		dev->drv->sta_ps(dev, sta, ps);
1415 
1416 	if (!ps)
1417 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1418 
1419 	ieee80211_sta_ps_transition(sta, ps);
1420 }
1421 
1422 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1423 		      struct napi_struct *napi)
1424 {
1425 	struct ieee80211_sta *sta;
1426 	struct ieee80211_hw *hw;
1427 	struct sk_buff *skb, *tmp;
1428 	LIST_HEAD(list);
1429 
1430 	spin_lock(&dev->rx_lock);
1431 	while ((skb = __skb_dequeue(frames)) != NULL) {
1432 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1433 
1434 		mt76_check_ccmp_pn(skb);
1435 		skb_shinfo(skb)->frag_list = NULL;
1436 		mt76_rx_convert(dev, skb, &hw, &sta);
1437 		ieee80211_rx_list(hw, sta, skb, &list);
1438 
1439 		/* subsequent amsdu frames */
1440 		while (nskb) {
1441 			skb = nskb;
1442 			nskb = nskb->next;
1443 			skb->next = NULL;
1444 
1445 			mt76_rx_convert(dev, skb, &hw, &sta);
1446 			ieee80211_rx_list(hw, sta, skb, &list);
1447 		}
1448 	}
1449 	spin_unlock(&dev->rx_lock);
1450 
1451 	if (!napi) {
1452 		netif_receive_skb_list(&list);
1453 		return;
1454 	}
1455 
1456 	list_for_each_entry_safe(skb, tmp, &list, list) {
1457 		skb_list_del_init(skb);
1458 		napi_gro_receive(napi, skb);
1459 	}
1460 }
1461 
1462 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1463 			   struct napi_struct *napi)
1464 {
1465 	struct sk_buff_head frames;
1466 	struct sk_buff *skb;
1467 
1468 	__skb_queue_head_init(&frames);
1469 
1470 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1471 		mt76_check_sta(dev, skb);
1472 		if (mtk_wed_device_active(&dev->mmio.wed))
1473 			__skb_queue_tail(&frames, skb);
1474 		else
1475 			mt76_rx_aggr_reorder(skb, &frames);
1476 	}
1477 
1478 	mt76_rx_complete(dev, &frames, napi);
1479 }
1480 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1481 
1482 static int
1483 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1484 	     struct ieee80211_sta *sta)
1485 {
1486 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1487 	struct mt76_dev *dev = phy->dev;
1488 	int ret;
1489 	int i;
1490 
1491 	mutex_lock(&dev->mutex);
1492 
1493 	ret = dev->drv->sta_add(dev, vif, sta);
1494 	if (ret)
1495 		goto out;
1496 
1497 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1498 		struct mt76_txq *mtxq;
1499 
1500 		if (!sta->txq[i])
1501 			continue;
1502 
1503 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1504 		mtxq->wcid = wcid->idx;
1505 	}
1506 
1507 	ewma_signal_init(&wcid->rssi);
1508 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1509 	phy->num_sta++;
1510 
1511 	mt76_wcid_init(wcid, phy->band_idx);
1512 out:
1513 	mutex_unlock(&dev->mutex);
1514 
1515 	return ret;
1516 }
1517 
1518 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1519 		       struct ieee80211_sta *sta)
1520 {
1521 	struct mt76_dev *dev = phy->dev;
1522 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1523 	int i, idx = wcid->idx;
1524 
1525 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1526 		mt76_rx_aggr_stop(dev, wcid, i);
1527 
1528 	if (dev->drv->sta_remove)
1529 		dev->drv->sta_remove(dev, vif, sta);
1530 
1531 	mt76_wcid_cleanup(dev, wcid);
1532 
1533 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1534 	phy->num_sta--;
1535 }
1536 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1537 
1538 static void
1539 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1540 		struct ieee80211_sta *sta)
1541 {
1542 	struct mt76_dev *dev = phy->dev;
1543 
1544 	mutex_lock(&dev->mutex);
1545 	__mt76_sta_remove(phy, vif, sta);
1546 	mutex_unlock(&dev->mutex);
1547 }
1548 
1549 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1550 		   struct ieee80211_sta *sta,
1551 		   enum ieee80211_sta_state old_state,
1552 		   enum ieee80211_sta_state new_state)
1553 {
1554 	struct mt76_phy *phy = hw->priv;
1555 	struct mt76_dev *dev = phy->dev;
1556 	enum mt76_sta_event ev;
1557 
1558 	if (old_state == IEEE80211_STA_NOTEXIST &&
1559 	    new_state == IEEE80211_STA_NONE)
1560 		return mt76_sta_add(phy, vif, sta);
1561 
1562 	if (old_state == IEEE80211_STA_NONE &&
1563 	    new_state == IEEE80211_STA_NOTEXIST)
1564 		mt76_sta_remove(phy, vif, sta);
1565 
1566 	if (!dev->drv->sta_event)
1567 		return 0;
1568 
1569 	if (old_state == IEEE80211_STA_AUTH &&
1570 	    new_state == IEEE80211_STA_ASSOC)
1571 		ev = MT76_STA_EVENT_ASSOC;
1572 	else if (old_state == IEEE80211_STA_ASSOC &&
1573 		 new_state == IEEE80211_STA_AUTHORIZED)
1574 		ev = MT76_STA_EVENT_AUTHORIZE;
1575 	else if (old_state == IEEE80211_STA_ASSOC &&
1576 		 new_state == IEEE80211_STA_AUTH)
1577 		ev = MT76_STA_EVENT_DISASSOC;
1578 	else
1579 		return 0;
1580 
1581 	return dev->drv->sta_event(dev, vif, sta, ev);
1582 }
1583 EXPORT_SYMBOL_GPL(mt76_sta_state);
1584 
1585 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1586 			     struct ieee80211_sta *sta)
1587 {
1588 	struct mt76_phy *phy = hw->priv;
1589 	struct mt76_dev *dev = phy->dev;
1590 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1591 
1592 	mutex_lock(&dev->mutex);
1593 	spin_lock_bh(&dev->status_lock);
1594 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1595 	spin_unlock_bh(&dev->status_lock);
1596 	mutex_unlock(&dev->mutex);
1597 }
1598 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1599 
1600 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1601 {
1602 	wcid->hw_key_idx = -1;
1603 	wcid->phy_idx = band_idx;
1604 
1605 	INIT_LIST_HEAD(&wcid->tx_list);
1606 	skb_queue_head_init(&wcid->tx_pending);
1607 	skb_queue_head_init(&wcid->tx_offchannel);
1608 
1609 	INIT_LIST_HEAD(&wcid->list);
1610 	idr_init(&wcid->pktid);
1611 
1612 	INIT_LIST_HEAD(&wcid->poll_list);
1613 }
1614 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1615 
1616 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1617 {
1618 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1619 	struct ieee80211_hw *hw;
1620 	struct sk_buff_head list;
1621 	struct sk_buff *skb;
1622 
1623 	mt76_tx_status_lock(dev, &list);
1624 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1625 	mt76_tx_status_unlock(dev, &list);
1626 
1627 	idr_destroy(&wcid->pktid);
1628 
1629 	spin_lock_bh(&phy->tx_lock);
1630 
1631 	if (!list_empty(&wcid->tx_list))
1632 		list_del_init(&wcid->tx_list);
1633 
1634 	spin_lock(&wcid->tx_pending.lock);
1635 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1636 	spin_unlock(&wcid->tx_pending.lock);
1637 
1638 	spin_unlock_bh(&phy->tx_lock);
1639 
1640 	while ((skb = __skb_dequeue(&list)) != NULL) {
1641 		hw = mt76_tx_status_get_hw(dev, skb);
1642 		ieee80211_free_txskb(hw, skb);
1643 	}
1644 }
1645 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1646 
1647 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1648 {
1649 	if (test_bit(MT76_MCU_RESET, &dev->phy.state))
1650 		return;
1651 
1652 	spin_lock_bh(&dev->sta_poll_lock);
1653 	if (list_empty(&wcid->poll_list))
1654 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1655 	spin_unlock_bh(&dev->sta_poll_lock);
1656 }
1657 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1658 
1659 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1660 		     unsigned int link_id, int *dbm)
1661 {
1662 	struct mt76_phy *phy = hw->priv;
1663 	int n_chains = hweight16(phy->chainmask);
1664 	int delta = mt76_tx_power_nss_delta(n_chains);
1665 
1666 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1667 
1668 	return 0;
1669 }
1670 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1671 
1672 int mt76_init_sar_power(struct ieee80211_hw *hw,
1673 			const struct cfg80211_sar_specs *sar)
1674 {
1675 	struct mt76_phy *phy = hw->priv;
1676 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1677 	int i;
1678 
1679 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1680 		return -EINVAL;
1681 
1682 	for (i = 0; i < sar->num_sub_specs; i++) {
1683 		u32 index = sar->sub_specs[i].freq_range_index;
1684 		/* SAR specifies power limitaton in 0.25dbm */
1685 		s32 power = sar->sub_specs[i].power >> 1;
1686 
1687 		if (power > 127 || power < -127)
1688 			power = 127;
1689 
1690 		phy->frp[index].range = &capa->freq_ranges[index];
1691 		phy->frp[index].power = power;
1692 	}
1693 
1694 	return 0;
1695 }
1696 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1697 
1698 int mt76_get_sar_power(struct mt76_phy *phy,
1699 		       struct ieee80211_channel *chan,
1700 		       int power)
1701 {
1702 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1703 	int freq, i;
1704 
1705 	if (!capa || !phy->frp)
1706 		return power;
1707 
1708 	if (power > 127 || power < -127)
1709 		power = 127;
1710 
1711 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1712 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1713 		if (phy->frp[i].range &&
1714 		    freq >= phy->frp[i].range->start_freq &&
1715 		    freq < phy->frp[i].range->end_freq) {
1716 			power = min_t(int, phy->frp[i].power, power);
1717 			break;
1718 		}
1719 	}
1720 
1721 	return power;
1722 }
1723 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1724 
1725 static void
1726 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1727 {
1728 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1729 		ieee80211_csa_finish(vif, 0);
1730 }
1731 
1732 void mt76_csa_finish(struct mt76_dev *dev)
1733 {
1734 	if (!dev->csa_complete)
1735 		return;
1736 
1737 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1738 		IEEE80211_IFACE_ITER_RESUME_ALL,
1739 		__mt76_csa_finish, dev);
1740 
1741 	dev->csa_complete = 0;
1742 }
1743 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1744 
1745 static void
1746 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1747 {
1748 	struct mt76_dev *dev = priv;
1749 
1750 	if (!vif->bss_conf.csa_active)
1751 		return;
1752 
1753 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1754 }
1755 
1756 void mt76_csa_check(struct mt76_dev *dev)
1757 {
1758 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1759 		IEEE80211_IFACE_ITER_RESUME_ALL,
1760 		__mt76_csa_check, dev);
1761 }
1762 EXPORT_SYMBOL_GPL(mt76_csa_check);
1763 
1764 int
1765 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1766 {
1767 	return 0;
1768 }
1769 EXPORT_SYMBOL_GPL(mt76_set_tim);
1770 
1771 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1772 {
1773 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1774 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1775 	u8 *hdr, *pn = status->iv;
1776 
1777 	__skb_push(skb, 8);
1778 	memmove(skb->data, skb->data + 8, hdr_len);
1779 	hdr = skb->data + hdr_len;
1780 
1781 	hdr[0] = pn[5];
1782 	hdr[1] = pn[4];
1783 	hdr[2] = 0;
1784 	hdr[3] = 0x20 | (key_id << 6);
1785 	hdr[4] = pn[3];
1786 	hdr[5] = pn[2];
1787 	hdr[6] = pn[1];
1788 	hdr[7] = pn[0];
1789 
1790 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1791 }
1792 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1793 
1794 int mt76_get_rate(struct mt76_dev *dev,
1795 		  struct ieee80211_supported_band *sband,
1796 		  int idx, bool cck)
1797 {
1798 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1799 	int i, offset = 0, len = sband->n_bitrates;
1800 
1801 	if (cck) {
1802 		if (!is_2g)
1803 			return 0;
1804 
1805 		idx &= ~BIT(2); /* short preamble */
1806 	} else if (is_2g) {
1807 		offset = 4;
1808 	}
1809 
1810 	for (i = offset; i < len; i++) {
1811 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1812 			return i;
1813 	}
1814 
1815 	return 0;
1816 }
1817 EXPORT_SYMBOL_GPL(mt76_get_rate);
1818 
1819 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1820 		  const u8 *mac)
1821 {
1822 	struct mt76_phy *phy = hw->priv;
1823 
1824 	set_bit(MT76_SCANNING, &phy->state);
1825 }
1826 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1827 
1828 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1829 {
1830 	struct mt76_phy *phy = hw->priv;
1831 
1832 	clear_bit(MT76_SCANNING, &phy->state);
1833 }
1834 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1835 
1836 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1837 {
1838 	struct mt76_phy *phy = hw->priv;
1839 	struct mt76_dev *dev = phy->dev;
1840 
1841 	mutex_lock(&dev->mutex);
1842 	*tx_ant = phy->antenna_mask;
1843 	*rx_ant = phy->antenna_mask;
1844 	mutex_unlock(&dev->mutex);
1845 
1846 	return 0;
1847 }
1848 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1849 
1850 struct mt76_queue *
1851 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1852 		int ring_base, void *wed, u32 flags)
1853 {
1854 	struct mt76_queue *hwq;
1855 	int err;
1856 
1857 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1858 	if (!hwq)
1859 		return ERR_PTR(-ENOMEM);
1860 
1861 	hwq->flags = flags;
1862 	hwq->wed = wed;
1863 
1864 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1865 	if (err < 0)
1866 		return ERR_PTR(err);
1867 
1868 	return hwq;
1869 }
1870 EXPORT_SYMBOL_GPL(mt76_init_queue);
1871 
1872 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1873 			 struct mt76_sta_stats *stats, bool eht)
1874 {
1875 	int i, ei = wi->initial_stat_idx;
1876 	u64 *data = wi->data;
1877 
1878 	wi->sta_count++;
1879 
1880 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1881 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1882 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1883 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1884 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1885 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1886 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1887 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1888 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1889 	if (eht) {
1890 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1891 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1892 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1893 	}
1894 
1895 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1896 		data[ei++] += stats->tx_bw[i];
1897 
1898 	for (i = 0; i < (eht ? 14 : 12); i++)
1899 		data[ei++] += stats->tx_mcs[i];
1900 
1901 	for (i = 0; i < 4; i++)
1902 		data[ei++] += stats->tx_nss[i];
1903 
1904 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1905 }
1906 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1907 
1908 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1909 {
1910 #ifdef CONFIG_PAGE_POOL_STATS
1911 	struct page_pool_stats stats = {};
1912 	int i;
1913 
1914 	mt76_for_each_q_rx(dev, i)
1915 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1916 
1917 	page_pool_ethtool_stats_get(data, &stats);
1918 	*index += page_pool_ethtool_stats_get_count();
1919 #endif
1920 }
1921 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1922 
1923 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1924 {
1925 	struct ieee80211_hw *hw = phy->hw;
1926 	struct mt76_dev *dev = phy->dev;
1927 
1928 	if (dev->region == NL80211_DFS_UNSET ||
1929 	    test_bit(MT76_SCANNING, &phy->state))
1930 		return MT_DFS_STATE_DISABLED;
1931 
1932 	if (!phy->radar_enabled) {
1933 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1934 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1935 			return MT_DFS_STATE_ACTIVE;
1936 
1937 		return MT_DFS_STATE_DISABLED;
1938 	}
1939 
1940 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1941 		return MT_DFS_STATE_CAC;
1942 
1943 	return MT_DFS_STATE_ACTIVE;
1944 }
1945 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1946 
1947 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
1948 {
1949 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
1950 	struct mt76_vif_data *mvif = mlink->mvif;
1951 
1952 	rcu_assign_pointer(mvif->link[0], NULL);
1953 	mt76_abort_scan(dev);
1954 }
1955 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
1956