xref: /linux/drivers/net/wireless/mediatek/mt76/mac80211.c (revision cbf5e61da66028ea30b52515dc1f1af969589bf7)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 	struct device_node *np = dev->dev->of_node;
201 
202 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 		return 0;
204 
205 	np = of_get_child_by_name(np, "led");
206 	if (np) {
207 		if (!of_device_is_available(np)) {
208 			of_node_put(np);
209 			dev_info(dev->dev,
210 				"led registration was explicitly disabled by dts\n");
211 			return 0;
212 		}
213 
214 		if (phy == &dev->phy) {
215 			int led_pin;
216 
217 			if (!of_property_read_u32(np, "led-sources", &led_pin))
218 				phy->leds.pin = led_pin;
219 
220 			phy->leds.al =
221 				of_property_read_bool(np, "led-active-low");
222 		}
223 
224 		of_node_put(np);
225 	}
226 
227 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 		 wiphy_name(hw->wiphy));
229 
230 	phy->leds.cdev.name = phy->leds.name;
231 	phy->leds.cdev.default_trigger =
232 		ieee80211_create_tpt_led_trigger(hw,
233 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 					mt76_tpt_blink,
235 					ARRAY_SIZE(mt76_tpt_blink));
236 
237 	dev_info(dev->dev,
238 		"registering led '%s'\n", phy->leds.name);
239 
240 	return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242 
243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 		return;
247 
248 	led_classdev_unregister(&phy->leds.cdev);
249 }
250 
251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 				 struct ieee80211_supported_band *sband,
253 				 bool vht)
254 {
255 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 	int i, nstream = hweight8(phy->antenna_mask);
257 	struct ieee80211_sta_vht_cap *vht_cap;
258 	u16 mcs_map = 0;
259 
260 	if (nstream > 1)
261 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 	else
263 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264 
265 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267 
268 	if (!vht)
269 		return;
270 
271 	vht_cap = &sband->vht_cap;
272 	if (nstream > 1)
273 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 	else
275 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278 
279 	for (i = 0; i < 8; i++) {
280 		if (i < nstream)
281 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 		else
283 			mcs_map |=
284 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 	}
286 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 		vht_cap->vht_mcs.tx_highest |=
290 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292 
293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 	if (phy->cap.has_2ghz)
296 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 	if (phy->cap.has_5ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 	if (phy->cap.has_6ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303 
304 static int
305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 		const struct ieee80211_channel *chan, int n_chan,
307 		struct ieee80211_rate *rates, int n_rates,
308 		bool ht, bool vht)
309 {
310 	struct ieee80211_supported_band *sband = &msband->sband;
311 	struct ieee80211_sta_vht_cap *vht_cap;
312 	struct ieee80211_sta_ht_cap *ht_cap;
313 	struct mt76_dev *dev = phy->dev;
314 	void *chanlist;
315 	int size;
316 
317 	size = n_chan * sizeof(*chan);
318 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 	if (!chanlist)
320 		return -ENOMEM;
321 
322 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 				    GFP_KERNEL);
324 	if (!msband->chan)
325 		return -ENOMEM;
326 
327 	sband->channels = chanlist;
328 	sband->n_channels = n_chan;
329 	sband->bitrates = rates;
330 	sband->n_bitrates = n_rates;
331 
332 	if (!ht)
333 		return 0;
334 
335 	ht_cap = &sband->ht_cap;
336 	ht_cap->ht_supported = true;
337 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 		       IEEE80211_HT_CAP_GRN_FLD |
339 		       IEEE80211_HT_CAP_SGI_20 |
340 		       IEEE80211_HT_CAP_SGI_40 |
341 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342 
343 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345 
346 	mt76_init_stream_cap(phy, sband, vht);
347 
348 	if (!vht)
349 		return 0;
350 
351 	vht_cap = &sband->vht_cap;
352 	vht_cap->vht_supported = true;
353 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 			IEEE80211_VHT_CAP_RXSTBC_1 |
355 			IEEE80211_VHT_CAP_SHORT_GI_80 |
356 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357 
358 	return 0;
359 }
360 
361 static int
362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369 			       n_rates, true, false);
370 }
371 
372 static int
373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates, bool vht)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380 			       n_rates, true, vht);
381 }
382 
383 static int
384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 		   int n_rates)
386 {
387 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388 
389 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391 			       n_rates, false, false);
392 }
393 
394 static void
395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 		 enum nl80211_band band)
397 {
398 	struct ieee80211_supported_band *sband = &msband->sband;
399 	bool found = false;
400 	int i;
401 
402 	if (!sband)
403 		return;
404 
405 	for (i = 0; i < sband->n_channels; i++) {
406 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 			continue;
408 
409 		found = true;
410 		break;
411 	}
412 
413 	if (found) {
414 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
415 					NL80211_CHAN_HT20);
416 		phy->chan_state = &msband->chan[0];
417 		return;
418 	}
419 
420 	sband->n_channels = 0;
421 	if (phy->hw->wiphy->bands[band] == sband)
422 		phy->hw->wiphy->bands[band] = NULL;
423 }
424 
425 static int
426 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
427 {
428 	struct mt76_dev *dev = phy->dev;
429 	struct wiphy *wiphy = hw->wiphy;
430 
431 	INIT_LIST_HEAD(&phy->tx_list);
432 	spin_lock_init(&phy->tx_lock);
433 
434 	if ((void *)phy != hw->priv)
435 		return 0;
436 
437 	SET_IEEE80211_DEV(hw, dev->dev);
438 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
439 
440 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
441 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
442 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
443 			WIPHY_FLAG_SUPPORTS_TDLS |
444 			WIPHY_FLAG_AP_UAPSD;
445 
446 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
447 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
448 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
449 
450 	wiphy->available_antennas_tx = phy->antenna_mask;
451 	wiphy->available_antennas_rx = phy->antenna_mask;
452 
453 	wiphy->sar_capa = &mt76_sar_capa;
454 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
455 				sizeof(struct mt76_freq_range_power),
456 				GFP_KERNEL);
457 	if (!phy->frp)
458 		return -ENOMEM;
459 
460 	hw->txq_data_size = sizeof(struct mt76_txq);
461 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
462 
463 	if (!hw->max_tx_fragments)
464 		hw->max_tx_fragments = 16;
465 
466 	ieee80211_hw_set(hw, SIGNAL_DBM);
467 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
468 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
469 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
470 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
471 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
472 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
473 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
474 
475 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
476 	    hw->max_tx_fragments > 1) {
477 		ieee80211_hw_set(hw, TX_AMSDU);
478 		ieee80211_hw_set(hw, TX_FRAG_LIST);
479 	}
480 
481 	ieee80211_hw_set(hw, MFP_CAPABLE);
482 	ieee80211_hw_set(hw, AP_LINK_PS);
483 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
484 
485 	return 0;
486 }
487 
488 struct mt76_phy *
489 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
490 		     u8 band_idx)
491 {
492 	struct ieee80211_hw *hw = dev->phy.hw;
493 	unsigned int phy_size;
494 	struct mt76_phy *phy;
495 
496 	phy_size = ALIGN(sizeof(*phy), 8);
497 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
498 	if (!phy)
499 		return NULL;
500 
501 	phy->dev = dev;
502 	phy->hw = hw;
503 	phy->priv = (void *)phy + phy_size;
504 	phy->band_idx = band_idx;
505 
506 	return phy;
507 }
508 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
509 
510 struct mt76_phy *
511 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
512 	       const struct ieee80211_ops *ops, u8 band_idx)
513 {
514 	struct ieee80211_hw *hw;
515 	unsigned int phy_size;
516 	struct mt76_phy *phy;
517 
518 	phy_size = ALIGN(sizeof(*phy), 8);
519 	hw = ieee80211_alloc_hw(size + phy_size, ops);
520 	if (!hw)
521 		return NULL;
522 
523 	phy = hw->priv;
524 	phy->dev = dev;
525 	phy->hw = hw;
526 	phy->priv = hw->priv + phy_size;
527 	phy->band_idx = band_idx;
528 
529 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
530 	hw->wiphy->interface_modes =
531 		BIT(NL80211_IFTYPE_STATION) |
532 		BIT(NL80211_IFTYPE_AP) |
533 #ifdef CONFIG_MAC80211_MESH
534 		BIT(NL80211_IFTYPE_MESH_POINT) |
535 #endif
536 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
537 		BIT(NL80211_IFTYPE_P2P_GO) |
538 		BIT(NL80211_IFTYPE_ADHOC);
539 
540 	return phy;
541 }
542 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
543 
544 int mt76_register_phy(struct mt76_phy *phy, bool vht,
545 		      struct ieee80211_rate *rates, int n_rates)
546 {
547 	int ret;
548 
549 	ret = mt76_phy_init(phy, phy->hw);
550 	if (ret)
551 		return ret;
552 
553 	if (phy->cap.has_2ghz) {
554 		ret = mt76_init_sband_2g(phy, rates, n_rates);
555 		if (ret)
556 			return ret;
557 	}
558 
559 	if (phy->cap.has_5ghz) {
560 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
561 		if (ret)
562 			return ret;
563 	}
564 
565 	if (phy->cap.has_6ghz) {
566 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
567 		if (ret)
568 			return ret;
569 	}
570 
571 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
572 		ret = mt76_led_init(phy);
573 		if (ret)
574 			return ret;
575 	}
576 
577 	wiphy_read_of_freq_limits(phy->hw->wiphy);
578 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
579 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
580 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
581 
582 	if ((void *)phy == phy->hw->priv) {
583 		ret = ieee80211_register_hw(phy->hw);
584 		if (ret)
585 			return ret;
586 	}
587 
588 	set_bit(MT76_STATE_REGISTERED, &phy->state);
589 	phy->dev->phys[phy->band_idx] = phy;
590 
591 	return 0;
592 }
593 EXPORT_SYMBOL_GPL(mt76_register_phy);
594 
595 void mt76_unregister_phy(struct mt76_phy *phy)
596 {
597 	struct mt76_dev *dev = phy->dev;
598 
599 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
600 		return;
601 
602 	if (IS_ENABLED(CONFIG_MT76_LEDS))
603 		mt76_led_cleanup(phy);
604 	mt76_tx_status_check(dev, true);
605 	ieee80211_unregister_hw(phy->hw);
606 	dev->phys[phy->band_idx] = NULL;
607 }
608 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
609 
610 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
611 {
612 	bool is_qrx = mt76_queue_is_rx(dev, q);
613 	struct page_pool_params pp_params = {
614 		.order = 0,
615 		.flags = 0,
616 		.nid = NUMA_NO_NODE,
617 		.dev = dev->dma_dev,
618 	};
619 	int idx = is_qrx ? q - dev->q_rx : -1;
620 
621 	/* Allocate page_pools just for rx/wed_tx_free queues */
622 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
623 		return 0;
624 
625 	switch (idx) {
626 	case MT_RXQ_MAIN:
627 	case MT_RXQ_BAND1:
628 	case MT_RXQ_BAND2:
629 		pp_params.pool_size = 256;
630 		break;
631 	default:
632 		pp_params.pool_size = 16;
633 		break;
634 	}
635 
636 	if (mt76_is_mmio(dev)) {
637 		/* rely on page_pool for DMA mapping */
638 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
639 		pp_params.dma_dir = DMA_FROM_DEVICE;
640 		pp_params.max_len = PAGE_SIZE;
641 		pp_params.offset = 0;
642 		/* NAPI is available just for rx queues */
643 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
644 			pp_params.napi = &dev->napi[idx];
645 	}
646 
647 	q->page_pool = page_pool_create(&pp_params);
648 	if (IS_ERR(q->page_pool)) {
649 		int err = PTR_ERR(q->page_pool);
650 
651 		q->page_pool = NULL;
652 		return err;
653 	}
654 
655 	return 0;
656 }
657 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
658 
659 struct mt76_dev *
660 mt76_alloc_device(struct device *pdev, unsigned int size,
661 		  const struct ieee80211_ops *ops,
662 		  const struct mt76_driver_ops *drv_ops)
663 {
664 	struct ieee80211_hw *hw;
665 	struct mt76_phy *phy;
666 	struct mt76_dev *dev;
667 	int i;
668 
669 	hw = ieee80211_alloc_hw(size, ops);
670 	if (!hw)
671 		return NULL;
672 
673 	dev = hw->priv;
674 	dev->hw = hw;
675 	dev->dev = pdev;
676 	dev->drv = drv_ops;
677 	dev->dma_dev = pdev;
678 
679 	phy = &dev->phy;
680 	phy->dev = dev;
681 	phy->hw = hw;
682 	phy->band_idx = MT_BAND0;
683 	dev->phys[phy->band_idx] = phy;
684 
685 	spin_lock_init(&dev->rx_lock);
686 	spin_lock_init(&dev->lock);
687 	spin_lock_init(&dev->cc_lock);
688 	spin_lock_init(&dev->status_lock);
689 	spin_lock_init(&dev->wed_lock);
690 	mutex_init(&dev->mutex);
691 	init_waitqueue_head(&dev->tx_wait);
692 
693 	skb_queue_head_init(&dev->mcu.res_q);
694 	init_waitqueue_head(&dev->mcu.wait);
695 	mutex_init(&dev->mcu.mutex);
696 	dev->tx_worker.fn = mt76_tx_worker;
697 
698 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
699 	hw->wiphy->interface_modes =
700 		BIT(NL80211_IFTYPE_STATION) |
701 		BIT(NL80211_IFTYPE_AP) |
702 #ifdef CONFIG_MAC80211_MESH
703 		BIT(NL80211_IFTYPE_MESH_POINT) |
704 #endif
705 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
706 		BIT(NL80211_IFTYPE_P2P_GO) |
707 		BIT(NL80211_IFTYPE_ADHOC);
708 
709 	spin_lock_init(&dev->token_lock);
710 	idr_init(&dev->token);
711 
712 	spin_lock_init(&dev->rx_token_lock);
713 	idr_init(&dev->rx_token);
714 
715 	INIT_LIST_HEAD(&dev->wcid_list);
716 	INIT_LIST_HEAD(&dev->sta_poll_list);
717 	spin_lock_init(&dev->sta_poll_lock);
718 
719 	INIT_LIST_HEAD(&dev->txwi_cache);
720 	INIT_LIST_HEAD(&dev->rxwi_cache);
721 	dev->token_size = dev->drv->token_size;
722 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
723 
724 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
725 		skb_queue_head_init(&dev->rx_skb[i]);
726 
727 	dev->wq = alloc_ordered_workqueue("mt76", 0);
728 	if (!dev->wq) {
729 		ieee80211_free_hw(hw);
730 		return NULL;
731 	}
732 
733 	return dev;
734 }
735 EXPORT_SYMBOL_GPL(mt76_alloc_device);
736 
737 int mt76_register_device(struct mt76_dev *dev, bool vht,
738 			 struct ieee80211_rate *rates, int n_rates)
739 {
740 	struct ieee80211_hw *hw = dev->hw;
741 	struct mt76_phy *phy = &dev->phy;
742 	int ret;
743 
744 	dev_set_drvdata(dev->dev, dev);
745 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
746 	ret = mt76_phy_init(phy, hw);
747 	if (ret)
748 		return ret;
749 
750 	if (phy->cap.has_2ghz) {
751 		ret = mt76_init_sband_2g(phy, rates, n_rates);
752 		if (ret)
753 			return ret;
754 	}
755 
756 	if (phy->cap.has_5ghz) {
757 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
758 		if (ret)
759 			return ret;
760 	}
761 
762 	if (phy->cap.has_6ghz) {
763 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
764 		if (ret)
765 			return ret;
766 	}
767 
768 	wiphy_read_of_freq_limits(hw->wiphy);
769 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
770 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
771 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
772 
773 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
774 		ret = mt76_led_init(phy);
775 		if (ret)
776 			return ret;
777 	}
778 
779 	ret = ieee80211_register_hw(hw);
780 	if (ret)
781 		return ret;
782 
783 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
784 	set_bit(MT76_STATE_REGISTERED, &phy->state);
785 	sched_set_fifo_low(dev->tx_worker.task);
786 
787 	return 0;
788 }
789 EXPORT_SYMBOL_GPL(mt76_register_device);
790 
791 void mt76_unregister_device(struct mt76_dev *dev)
792 {
793 	struct ieee80211_hw *hw = dev->hw;
794 
795 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
796 		return;
797 
798 	if (IS_ENABLED(CONFIG_MT76_LEDS))
799 		mt76_led_cleanup(&dev->phy);
800 	mt76_tx_status_check(dev, true);
801 	mt76_wcid_cleanup(dev, &dev->global_wcid);
802 	ieee80211_unregister_hw(hw);
803 }
804 EXPORT_SYMBOL_GPL(mt76_unregister_device);
805 
806 void mt76_free_device(struct mt76_dev *dev)
807 {
808 	mt76_worker_teardown(&dev->tx_worker);
809 	if (dev->wq) {
810 		destroy_workqueue(dev->wq);
811 		dev->wq = NULL;
812 	}
813 	ieee80211_free_hw(dev->hw);
814 }
815 EXPORT_SYMBOL_GPL(mt76_free_device);
816 
817 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
818 {
819 	struct sk_buff *skb = phy->rx_amsdu[q].head;
820 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
821 	struct mt76_dev *dev = phy->dev;
822 
823 	phy->rx_amsdu[q].head = NULL;
824 	phy->rx_amsdu[q].tail = NULL;
825 
826 	/*
827 	 * Validate if the amsdu has a proper first subframe.
828 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
829 	 * flag of the QoS header gets flipped. In such cases, the first
830 	 * subframe has a LLC/SNAP header in the location of the destination
831 	 * address.
832 	 */
833 	if (skb_shinfo(skb)->frag_list) {
834 		int offset = 0;
835 
836 		if (!(status->flag & RX_FLAG_8023)) {
837 			offset = ieee80211_get_hdrlen_from_skb(skb);
838 
839 			if ((status->flag &
840 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
841 			    RX_FLAG_DECRYPTED)
842 				offset += 8;
843 		}
844 
845 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
846 			dev_kfree_skb(skb);
847 			return;
848 		}
849 	}
850 	__skb_queue_tail(&dev->rx_skb[q], skb);
851 }
852 
853 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
854 				  struct sk_buff *skb)
855 {
856 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
857 
858 	if (phy->rx_amsdu[q].head &&
859 	    (!status->amsdu || status->first_amsdu ||
860 	     status->seqno != phy->rx_amsdu[q].seqno))
861 		mt76_rx_release_amsdu(phy, q);
862 
863 	if (!phy->rx_amsdu[q].head) {
864 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
865 		phy->rx_amsdu[q].seqno = status->seqno;
866 		phy->rx_amsdu[q].head = skb;
867 	} else {
868 		*phy->rx_amsdu[q].tail = skb;
869 		phy->rx_amsdu[q].tail = &skb->next;
870 	}
871 
872 	if (!status->amsdu || status->last_amsdu)
873 		mt76_rx_release_amsdu(phy, q);
874 }
875 
876 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
877 {
878 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
879 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
880 
881 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
882 		dev_kfree_skb(skb);
883 		return;
884 	}
885 
886 #ifdef CONFIG_NL80211_TESTMODE
887 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
888 		phy->test.rx_stats.packets[q]++;
889 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
890 			phy->test.rx_stats.fcs_error[q]++;
891 	}
892 #endif
893 
894 	mt76_rx_release_burst(phy, q, skb);
895 }
896 EXPORT_SYMBOL_GPL(mt76_rx);
897 
898 bool mt76_has_tx_pending(struct mt76_phy *phy)
899 {
900 	struct mt76_queue *q;
901 	int i;
902 
903 	for (i = 0; i < __MT_TXQ_MAX; i++) {
904 		q = phy->q_tx[i];
905 		if (q && q->queued)
906 			return true;
907 	}
908 
909 	return false;
910 }
911 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
912 
913 static struct mt76_channel_state *
914 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
915 {
916 	struct mt76_sband *msband;
917 	int idx;
918 
919 	if (c->band == NL80211_BAND_2GHZ)
920 		msband = &phy->sband_2g;
921 	else if (c->band == NL80211_BAND_6GHZ)
922 		msband = &phy->sband_6g;
923 	else
924 		msband = &phy->sband_5g;
925 
926 	idx = c - &msband->sband.channels[0];
927 	return &msband->chan[idx];
928 }
929 
930 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
931 {
932 	struct mt76_channel_state *state = phy->chan_state;
933 
934 	state->cc_active += ktime_to_us(ktime_sub(time,
935 						  phy->survey_time));
936 	phy->survey_time = time;
937 }
938 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
939 
940 void mt76_update_survey(struct mt76_phy *phy)
941 {
942 	struct mt76_dev *dev = phy->dev;
943 	ktime_t cur_time;
944 
945 	if (dev->drv->update_survey)
946 		dev->drv->update_survey(phy);
947 
948 	cur_time = ktime_get_boottime();
949 	mt76_update_survey_active_time(phy, cur_time);
950 
951 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
952 		struct mt76_channel_state *state = phy->chan_state;
953 
954 		spin_lock_bh(&dev->cc_lock);
955 		state->cc_bss_rx += dev->cur_cc_bss_rx;
956 		dev->cur_cc_bss_rx = 0;
957 		spin_unlock_bh(&dev->cc_lock);
958 	}
959 }
960 EXPORT_SYMBOL_GPL(mt76_update_survey);
961 
962 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
963 		     bool offchannel)
964 {
965 	struct mt76_dev *dev = phy->dev;
966 	int timeout = HZ / 5;
967 	int ret;
968 
969 	cancel_delayed_work_sync(&phy->mac_work);
970 
971 	mutex_lock(&dev->mutex);
972 	set_bit(MT76_RESET, &phy->state);
973 
974 	mt76_worker_disable(&dev->tx_worker);
975 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
976 	mt76_update_survey(phy);
977 
978 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
979 	    phy->chandef.width != chandef->width)
980 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
981 
982 	phy->chandef = *chandef;
983 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
984 	phy->offchannel = offchannel;
985 
986 	if (!offchannel)
987 		phy->main_chandef = *chandef;
988 
989 	if (chandef->chan != phy->main_chandef.chan)
990 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
991 
992 	ret = dev->drv->set_channel(phy);
993 
994 	clear_bit(MT76_RESET, &phy->state);
995 	mt76_worker_enable(&dev->tx_worker);
996 	mt76_worker_schedule(&dev->tx_worker);
997 
998 	mutex_unlock(&dev->mutex);
999 
1000 	return ret;
1001 }
1002 
1003 int mt76_update_channel(struct mt76_phy *phy)
1004 {
1005 	struct ieee80211_hw *hw = phy->hw;
1006 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1007 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1008 
1009 	return mt76_set_channel(phy, chandef, offchannel);
1010 }
1011 EXPORT_SYMBOL_GPL(mt76_update_channel);
1012 
1013 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1014 		    struct survey_info *survey)
1015 {
1016 	struct mt76_phy *phy = hw->priv;
1017 	struct mt76_dev *dev = phy->dev;
1018 	struct mt76_sband *sband;
1019 	struct ieee80211_channel *chan;
1020 	struct mt76_channel_state *state;
1021 	int ret = 0;
1022 
1023 	mutex_lock(&dev->mutex);
1024 	if (idx == 0 && dev->drv->update_survey)
1025 		mt76_update_survey(phy);
1026 
1027 	if (idx >= phy->sband_2g.sband.n_channels +
1028 		   phy->sband_5g.sband.n_channels) {
1029 		idx -= (phy->sband_2g.sband.n_channels +
1030 			phy->sband_5g.sband.n_channels);
1031 		sband = &phy->sband_6g;
1032 	} else if (idx >= phy->sband_2g.sband.n_channels) {
1033 		idx -= phy->sband_2g.sband.n_channels;
1034 		sband = &phy->sband_5g;
1035 	} else {
1036 		sband = &phy->sband_2g;
1037 	}
1038 
1039 	if (idx >= sband->sband.n_channels) {
1040 		ret = -ENOENT;
1041 		goto out;
1042 	}
1043 
1044 	chan = &sband->sband.channels[idx];
1045 	state = mt76_channel_state(phy, chan);
1046 
1047 	memset(survey, 0, sizeof(*survey));
1048 	survey->channel = chan;
1049 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1050 	survey->filled |= dev->drv->survey_flags;
1051 	if (state->noise)
1052 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1053 
1054 	if (chan == phy->main_chandef.chan) {
1055 		survey->filled |= SURVEY_INFO_IN_USE;
1056 
1057 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1058 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1059 	}
1060 
1061 	survey->time_busy = div_u64(state->cc_busy, 1000);
1062 	survey->time_rx = div_u64(state->cc_rx, 1000);
1063 	survey->time = div_u64(state->cc_active, 1000);
1064 	survey->noise = state->noise;
1065 
1066 	spin_lock_bh(&dev->cc_lock);
1067 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1068 	survey->time_tx = div_u64(state->cc_tx, 1000);
1069 	spin_unlock_bh(&dev->cc_lock);
1070 
1071 out:
1072 	mutex_unlock(&dev->mutex);
1073 
1074 	return ret;
1075 }
1076 EXPORT_SYMBOL_GPL(mt76_get_survey);
1077 
1078 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1079 			 struct ieee80211_key_conf *key)
1080 {
1081 	struct ieee80211_key_seq seq;
1082 	int i;
1083 
1084 	wcid->rx_check_pn = false;
1085 
1086 	if (!key)
1087 		return;
1088 
1089 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1090 		return;
1091 
1092 	wcid->rx_check_pn = true;
1093 
1094 	/* data frame */
1095 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1096 		ieee80211_get_key_rx_seq(key, i, &seq);
1097 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1098 	}
1099 
1100 	/* robust management frame */
1101 	ieee80211_get_key_rx_seq(key, -1, &seq);
1102 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1103 
1104 }
1105 EXPORT_SYMBOL(mt76_wcid_key_setup);
1106 
1107 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1108 {
1109 	int signal = -128;
1110 	u8 chains;
1111 
1112 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1113 		int cur, diff;
1114 
1115 		cur = *chain_signal;
1116 		if (!(chains & BIT(0)) ||
1117 		    cur > 0)
1118 			continue;
1119 
1120 		if (cur > signal)
1121 			swap(cur, signal);
1122 
1123 		diff = signal - cur;
1124 		if (diff == 0)
1125 			signal += 3;
1126 		else if (diff <= 2)
1127 			signal += 2;
1128 		else if (diff <= 6)
1129 			signal += 1;
1130 	}
1131 
1132 	return signal;
1133 }
1134 EXPORT_SYMBOL(mt76_rx_signal);
1135 
1136 static void
1137 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1138 		struct ieee80211_hw **hw,
1139 		struct ieee80211_sta **sta)
1140 {
1141 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1142 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1143 	struct mt76_rx_status mstat;
1144 
1145 	mstat = *((struct mt76_rx_status *)skb->cb);
1146 	memset(status, 0, sizeof(*status));
1147 
1148 	status->flag = mstat.flag;
1149 	status->freq = mstat.freq;
1150 	status->enc_flags = mstat.enc_flags;
1151 	status->encoding = mstat.encoding;
1152 	status->bw = mstat.bw;
1153 	if (status->encoding == RX_ENC_EHT) {
1154 		status->eht.ru = mstat.eht.ru;
1155 		status->eht.gi = mstat.eht.gi;
1156 	} else {
1157 		status->he_ru = mstat.he_ru;
1158 		status->he_gi = mstat.he_gi;
1159 		status->he_dcm = mstat.he_dcm;
1160 	}
1161 	status->rate_idx = mstat.rate_idx;
1162 	status->nss = mstat.nss;
1163 	status->band = mstat.band;
1164 	status->signal = mstat.signal;
1165 	status->chains = mstat.chains;
1166 	status->ampdu_reference = mstat.ampdu_ref;
1167 	status->device_timestamp = mstat.timestamp;
1168 	status->mactime = mstat.timestamp;
1169 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1170 	if (status->signal <= -128)
1171 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1172 
1173 	if (ieee80211_is_beacon(hdr->frame_control) ||
1174 	    ieee80211_is_probe_resp(hdr->frame_control))
1175 		status->boottime_ns = ktime_get_boottime_ns();
1176 
1177 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1178 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1179 		     sizeof(mstat.chain_signal));
1180 	memcpy(status->chain_signal, mstat.chain_signal,
1181 	       sizeof(mstat.chain_signal));
1182 
1183 	if (mstat.wcid) {
1184 		status->link_valid = mstat.wcid->link_valid;
1185 		status->link_id = mstat.wcid->link_id;
1186 	}
1187 
1188 	*sta = wcid_to_sta(mstat.wcid);
1189 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1190 }
1191 
1192 static void
1193 mt76_check_ccmp_pn(struct sk_buff *skb)
1194 {
1195 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1196 	struct mt76_wcid *wcid = status->wcid;
1197 	struct ieee80211_hdr *hdr;
1198 	int security_idx;
1199 	int ret;
1200 
1201 	if (!(status->flag & RX_FLAG_DECRYPTED))
1202 		return;
1203 
1204 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1205 		return;
1206 
1207 	if (!wcid || !wcid->rx_check_pn)
1208 		return;
1209 
1210 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1211 	if (status->flag & RX_FLAG_8023)
1212 		goto skip_hdr_check;
1213 
1214 	hdr = mt76_skb_get_hdr(skb);
1215 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1216 		/*
1217 		 * Validate the first fragment both here and in mac80211
1218 		 * All further fragments will be validated by mac80211 only.
1219 		 */
1220 		if (ieee80211_is_frag(hdr) &&
1221 		    !ieee80211_is_first_frag(hdr->frame_control))
1222 			return;
1223 	}
1224 
1225 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1226 	 *
1227 	 * the recipient shall maintain a single replay counter for received
1228 	 * individually addressed robust Management frames that are received
1229 	 * with the To DS subfield equal to 0, [...]
1230 	 */
1231 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1232 	    !ieee80211_has_tods(hdr->frame_control))
1233 		security_idx = IEEE80211_NUM_TIDS;
1234 
1235 skip_hdr_check:
1236 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1237 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1238 		     sizeof(status->iv));
1239 	if (ret <= 0) {
1240 		status->flag |= RX_FLAG_ONLY_MONITOR;
1241 		return;
1242 	}
1243 
1244 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1245 
1246 	if (status->flag & RX_FLAG_IV_STRIPPED)
1247 		status->flag |= RX_FLAG_PN_VALIDATED;
1248 }
1249 
1250 static void
1251 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1252 		    int len)
1253 {
1254 	struct mt76_wcid *wcid = status->wcid;
1255 	struct ieee80211_rx_status info = {
1256 		.enc_flags = status->enc_flags,
1257 		.rate_idx = status->rate_idx,
1258 		.encoding = status->encoding,
1259 		.band = status->band,
1260 		.nss = status->nss,
1261 		.bw = status->bw,
1262 	};
1263 	struct ieee80211_sta *sta;
1264 	u32 airtime;
1265 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1266 
1267 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1268 	spin_lock(&dev->cc_lock);
1269 	dev->cur_cc_bss_rx += airtime;
1270 	spin_unlock(&dev->cc_lock);
1271 
1272 	if (!wcid || !wcid->sta)
1273 		return;
1274 
1275 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1276 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1277 }
1278 
1279 static void
1280 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1281 {
1282 	struct mt76_wcid *wcid;
1283 	int wcid_idx;
1284 
1285 	if (!dev->rx_ampdu_len)
1286 		return;
1287 
1288 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1289 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1290 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1291 	else
1292 		wcid = NULL;
1293 	dev->rx_ampdu_status.wcid = wcid;
1294 
1295 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1296 
1297 	dev->rx_ampdu_len = 0;
1298 	dev->rx_ampdu_ref = 0;
1299 }
1300 
1301 static void
1302 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1303 {
1304 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1305 	struct mt76_wcid *wcid = status->wcid;
1306 
1307 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1308 		return;
1309 
1310 	if (!wcid || !wcid->sta) {
1311 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1312 
1313 		if (status->flag & RX_FLAG_8023)
1314 			return;
1315 
1316 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1317 			return;
1318 
1319 		wcid = NULL;
1320 	}
1321 
1322 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1323 	    status->ampdu_ref != dev->rx_ampdu_ref)
1324 		mt76_airtime_flush_ampdu(dev);
1325 
1326 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1327 		if (!dev->rx_ampdu_len ||
1328 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1329 			dev->rx_ampdu_status = *status;
1330 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1331 			dev->rx_ampdu_ref = status->ampdu_ref;
1332 		}
1333 
1334 		dev->rx_ampdu_len += skb->len;
1335 		return;
1336 	}
1337 
1338 	mt76_airtime_report(dev, status, skb->len);
1339 }
1340 
1341 static void
1342 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1343 {
1344 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1345 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1346 	struct ieee80211_sta *sta;
1347 	struct ieee80211_hw *hw;
1348 	struct mt76_wcid *wcid = status->wcid;
1349 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1350 	bool ps;
1351 
1352 	hw = mt76_phy_hw(dev, status->phy_idx);
1353 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1354 	    !(status->flag & RX_FLAG_8023)) {
1355 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1356 		if (sta)
1357 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1358 	}
1359 
1360 	mt76_airtime_check(dev, skb);
1361 
1362 	if (!wcid || !wcid->sta)
1363 		return;
1364 
1365 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1366 
1367 	if (status->signal <= 0)
1368 		ewma_signal_add(&wcid->rssi, -status->signal);
1369 
1370 	wcid->inactive_count = 0;
1371 
1372 	if (status->flag & RX_FLAG_8023)
1373 		return;
1374 
1375 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1376 		return;
1377 
1378 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1379 		ieee80211_sta_pspoll(sta);
1380 		return;
1381 	}
1382 
1383 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1384 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1385 	      ieee80211_is_data(hdr->frame_control)))
1386 		return;
1387 
1388 	ps = ieee80211_has_pm(hdr->frame_control);
1389 
1390 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1391 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1392 		ieee80211_sta_uapsd_trigger(sta, tidno);
1393 
1394 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1395 		return;
1396 
1397 	if (ps)
1398 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1399 
1400 	if (dev->drv->sta_ps)
1401 		dev->drv->sta_ps(dev, sta, ps);
1402 
1403 	if (!ps)
1404 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1405 
1406 	ieee80211_sta_ps_transition(sta, ps);
1407 }
1408 
1409 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1410 		      struct napi_struct *napi)
1411 {
1412 	struct ieee80211_sta *sta;
1413 	struct ieee80211_hw *hw;
1414 	struct sk_buff *skb, *tmp;
1415 	LIST_HEAD(list);
1416 
1417 	spin_lock(&dev->rx_lock);
1418 	while ((skb = __skb_dequeue(frames)) != NULL) {
1419 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1420 
1421 		mt76_check_ccmp_pn(skb);
1422 		skb_shinfo(skb)->frag_list = NULL;
1423 		mt76_rx_convert(dev, skb, &hw, &sta);
1424 		ieee80211_rx_list(hw, sta, skb, &list);
1425 
1426 		/* subsequent amsdu frames */
1427 		while (nskb) {
1428 			skb = nskb;
1429 			nskb = nskb->next;
1430 			skb->next = NULL;
1431 
1432 			mt76_rx_convert(dev, skb, &hw, &sta);
1433 			ieee80211_rx_list(hw, sta, skb, &list);
1434 		}
1435 	}
1436 	spin_unlock(&dev->rx_lock);
1437 
1438 	if (!napi) {
1439 		netif_receive_skb_list(&list);
1440 		return;
1441 	}
1442 
1443 	list_for_each_entry_safe(skb, tmp, &list, list) {
1444 		skb_list_del_init(skb);
1445 		napi_gro_receive(napi, skb);
1446 	}
1447 }
1448 
1449 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1450 			   struct napi_struct *napi)
1451 {
1452 	struct sk_buff_head frames;
1453 	struct sk_buff *skb;
1454 
1455 	__skb_queue_head_init(&frames);
1456 
1457 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1458 		mt76_check_sta(dev, skb);
1459 		if (mtk_wed_device_active(&dev->mmio.wed))
1460 			__skb_queue_tail(&frames, skb);
1461 		else
1462 			mt76_rx_aggr_reorder(skb, &frames);
1463 	}
1464 
1465 	mt76_rx_complete(dev, &frames, napi);
1466 }
1467 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1468 
1469 static int
1470 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1471 	     struct ieee80211_sta *sta)
1472 {
1473 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1474 	struct mt76_dev *dev = phy->dev;
1475 	int ret;
1476 	int i;
1477 
1478 	mutex_lock(&dev->mutex);
1479 
1480 	ret = dev->drv->sta_add(dev, vif, sta);
1481 	if (ret)
1482 		goto out;
1483 
1484 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1485 		struct mt76_txq *mtxq;
1486 
1487 		if (!sta->txq[i])
1488 			continue;
1489 
1490 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1491 		mtxq->wcid = wcid->idx;
1492 	}
1493 
1494 	ewma_signal_init(&wcid->rssi);
1495 	if (phy->band_idx == MT_BAND1)
1496 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1497 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1498 	phy->num_sta++;
1499 
1500 	mt76_wcid_init(wcid, phy->band_idx);
1501 out:
1502 	mutex_unlock(&dev->mutex);
1503 
1504 	return ret;
1505 }
1506 
1507 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1508 		       struct ieee80211_sta *sta)
1509 {
1510 	struct mt76_dev *dev = phy->dev;
1511 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1512 	int i, idx = wcid->idx;
1513 
1514 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1515 		mt76_rx_aggr_stop(dev, wcid, i);
1516 
1517 	if (dev->drv->sta_remove)
1518 		dev->drv->sta_remove(dev, vif, sta);
1519 
1520 	mt76_wcid_cleanup(dev, wcid);
1521 
1522 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1523 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1524 	phy->num_sta--;
1525 }
1526 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1527 
1528 static void
1529 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1530 		struct ieee80211_sta *sta)
1531 {
1532 	struct mt76_dev *dev = phy->dev;
1533 
1534 	mutex_lock(&dev->mutex);
1535 	__mt76_sta_remove(phy, vif, sta);
1536 	mutex_unlock(&dev->mutex);
1537 }
1538 
1539 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1540 		   struct ieee80211_sta *sta,
1541 		   enum ieee80211_sta_state old_state,
1542 		   enum ieee80211_sta_state new_state)
1543 {
1544 	struct mt76_phy *phy = hw->priv;
1545 	struct mt76_dev *dev = phy->dev;
1546 	enum mt76_sta_event ev;
1547 
1548 	if (old_state == IEEE80211_STA_NOTEXIST &&
1549 	    new_state == IEEE80211_STA_NONE)
1550 		return mt76_sta_add(phy, vif, sta);
1551 
1552 	if (old_state == IEEE80211_STA_NONE &&
1553 	    new_state == IEEE80211_STA_NOTEXIST)
1554 		mt76_sta_remove(phy, vif, sta);
1555 
1556 	if (!dev->drv->sta_event)
1557 		return 0;
1558 
1559 	if (old_state == IEEE80211_STA_AUTH &&
1560 	    new_state == IEEE80211_STA_ASSOC)
1561 		ev = MT76_STA_EVENT_ASSOC;
1562 	else if (old_state == IEEE80211_STA_ASSOC &&
1563 		 new_state == IEEE80211_STA_AUTHORIZED)
1564 		ev = MT76_STA_EVENT_AUTHORIZE;
1565 	else if (old_state == IEEE80211_STA_ASSOC &&
1566 		 new_state == IEEE80211_STA_AUTH)
1567 		ev = MT76_STA_EVENT_DISASSOC;
1568 	else
1569 		return 0;
1570 
1571 	return dev->drv->sta_event(dev, vif, sta, ev);
1572 }
1573 EXPORT_SYMBOL_GPL(mt76_sta_state);
1574 
1575 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1576 			     struct ieee80211_sta *sta)
1577 {
1578 	struct mt76_phy *phy = hw->priv;
1579 	struct mt76_dev *dev = phy->dev;
1580 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1581 
1582 	mutex_lock(&dev->mutex);
1583 	spin_lock_bh(&dev->status_lock);
1584 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1585 	spin_unlock_bh(&dev->status_lock);
1586 	mutex_unlock(&dev->mutex);
1587 }
1588 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1589 
1590 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1591 {
1592 	wcid->hw_key_idx = -1;
1593 	wcid->phy_idx = band_idx;
1594 
1595 	INIT_LIST_HEAD(&wcid->tx_list);
1596 	skb_queue_head_init(&wcid->tx_pending);
1597 	skb_queue_head_init(&wcid->tx_offchannel);
1598 
1599 	INIT_LIST_HEAD(&wcid->list);
1600 	idr_init(&wcid->pktid);
1601 
1602 	INIT_LIST_HEAD(&wcid->poll_list);
1603 }
1604 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1605 
1606 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1607 {
1608 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1609 	struct ieee80211_hw *hw;
1610 	struct sk_buff_head list;
1611 	struct sk_buff *skb;
1612 
1613 	mt76_tx_status_lock(dev, &list);
1614 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1615 	mt76_tx_status_unlock(dev, &list);
1616 
1617 	idr_destroy(&wcid->pktid);
1618 
1619 	spin_lock_bh(&phy->tx_lock);
1620 
1621 	if (!list_empty(&wcid->tx_list))
1622 		list_del_init(&wcid->tx_list);
1623 
1624 	spin_lock(&wcid->tx_pending.lock);
1625 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1626 	spin_unlock(&wcid->tx_pending.lock);
1627 
1628 	spin_unlock_bh(&phy->tx_lock);
1629 
1630 	while ((skb = __skb_dequeue(&list)) != NULL) {
1631 		hw = mt76_tx_status_get_hw(dev, skb);
1632 		ieee80211_free_txskb(hw, skb);
1633 	}
1634 }
1635 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1636 
1637 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1638 {
1639 	if (test_bit(MT76_MCU_RESET, &dev->phy.state))
1640 		return;
1641 
1642 	spin_lock_bh(&dev->sta_poll_lock);
1643 	if (list_empty(&wcid->poll_list))
1644 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1645 	spin_unlock_bh(&dev->sta_poll_lock);
1646 }
1647 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1648 
1649 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1650 		     unsigned int link_id, int *dbm)
1651 {
1652 	struct mt76_phy *phy = hw->priv;
1653 	int n_chains = hweight16(phy->chainmask);
1654 	int delta = mt76_tx_power_nss_delta(n_chains);
1655 
1656 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1657 
1658 	return 0;
1659 }
1660 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1661 
1662 int mt76_init_sar_power(struct ieee80211_hw *hw,
1663 			const struct cfg80211_sar_specs *sar)
1664 {
1665 	struct mt76_phy *phy = hw->priv;
1666 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1667 	int i;
1668 
1669 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1670 		return -EINVAL;
1671 
1672 	for (i = 0; i < sar->num_sub_specs; i++) {
1673 		u32 index = sar->sub_specs[i].freq_range_index;
1674 		/* SAR specifies power limitaton in 0.25dbm */
1675 		s32 power = sar->sub_specs[i].power >> 1;
1676 
1677 		if (power > 127 || power < -127)
1678 			power = 127;
1679 
1680 		phy->frp[index].range = &capa->freq_ranges[index];
1681 		phy->frp[index].power = power;
1682 	}
1683 
1684 	return 0;
1685 }
1686 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1687 
1688 int mt76_get_sar_power(struct mt76_phy *phy,
1689 		       struct ieee80211_channel *chan,
1690 		       int power)
1691 {
1692 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1693 	int freq, i;
1694 
1695 	if (!capa || !phy->frp)
1696 		return power;
1697 
1698 	if (power > 127 || power < -127)
1699 		power = 127;
1700 
1701 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1702 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1703 		if (phy->frp[i].range &&
1704 		    freq >= phy->frp[i].range->start_freq &&
1705 		    freq < phy->frp[i].range->end_freq) {
1706 			power = min_t(int, phy->frp[i].power, power);
1707 			break;
1708 		}
1709 	}
1710 
1711 	return power;
1712 }
1713 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1714 
1715 static void
1716 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1717 {
1718 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1719 		ieee80211_csa_finish(vif, 0);
1720 }
1721 
1722 void mt76_csa_finish(struct mt76_dev *dev)
1723 {
1724 	if (!dev->csa_complete)
1725 		return;
1726 
1727 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1728 		IEEE80211_IFACE_ITER_RESUME_ALL,
1729 		__mt76_csa_finish, dev);
1730 
1731 	dev->csa_complete = 0;
1732 }
1733 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1734 
1735 static void
1736 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1737 {
1738 	struct mt76_dev *dev = priv;
1739 
1740 	if (!vif->bss_conf.csa_active)
1741 		return;
1742 
1743 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1744 }
1745 
1746 void mt76_csa_check(struct mt76_dev *dev)
1747 {
1748 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1749 		IEEE80211_IFACE_ITER_RESUME_ALL,
1750 		__mt76_csa_check, dev);
1751 }
1752 EXPORT_SYMBOL_GPL(mt76_csa_check);
1753 
1754 int
1755 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1756 {
1757 	return 0;
1758 }
1759 EXPORT_SYMBOL_GPL(mt76_set_tim);
1760 
1761 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1762 {
1763 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1764 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1765 	u8 *hdr, *pn = status->iv;
1766 
1767 	__skb_push(skb, 8);
1768 	memmove(skb->data, skb->data + 8, hdr_len);
1769 	hdr = skb->data + hdr_len;
1770 
1771 	hdr[0] = pn[5];
1772 	hdr[1] = pn[4];
1773 	hdr[2] = 0;
1774 	hdr[3] = 0x20 | (key_id << 6);
1775 	hdr[4] = pn[3];
1776 	hdr[5] = pn[2];
1777 	hdr[6] = pn[1];
1778 	hdr[7] = pn[0];
1779 
1780 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1781 }
1782 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1783 
1784 int mt76_get_rate(struct mt76_dev *dev,
1785 		  struct ieee80211_supported_band *sband,
1786 		  int idx, bool cck)
1787 {
1788 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1789 	int i, offset = 0, len = sband->n_bitrates;
1790 
1791 	if (cck) {
1792 		if (!is_2g)
1793 			return 0;
1794 
1795 		idx &= ~BIT(2); /* short preamble */
1796 	} else if (is_2g) {
1797 		offset = 4;
1798 	}
1799 
1800 	for (i = offset; i < len; i++) {
1801 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1802 			return i;
1803 	}
1804 
1805 	return 0;
1806 }
1807 EXPORT_SYMBOL_GPL(mt76_get_rate);
1808 
1809 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1810 		  const u8 *mac)
1811 {
1812 	struct mt76_phy *phy = hw->priv;
1813 
1814 	set_bit(MT76_SCANNING, &phy->state);
1815 }
1816 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1817 
1818 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1819 {
1820 	struct mt76_phy *phy = hw->priv;
1821 
1822 	clear_bit(MT76_SCANNING, &phy->state);
1823 }
1824 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1825 
1826 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1827 {
1828 	struct mt76_phy *phy = hw->priv;
1829 	struct mt76_dev *dev = phy->dev;
1830 
1831 	mutex_lock(&dev->mutex);
1832 	*tx_ant = phy->antenna_mask;
1833 	*rx_ant = phy->antenna_mask;
1834 	mutex_unlock(&dev->mutex);
1835 
1836 	return 0;
1837 }
1838 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1839 
1840 struct mt76_queue *
1841 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1842 		int ring_base, void *wed, u32 flags)
1843 {
1844 	struct mt76_queue *hwq;
1845 	int err;
1846 
1847 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1848 	if (!hwq)
1849 		return ERR_PTR(-ENOMEM);
1850 
1851 	hwq->flags = flags;
1852 	hwq->wed = wed;
1853 
1854 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1855 	if (err < 0)
1856 		return ERR_PTR(err);
1857 
1858 	return hwq;
1859 }
1860 EXPORT_SYMBOL_GPL(mt76_init_queue);
1861 
1862 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1863 			 struct mt76_sta_stats *stats, bool eht)
1864 {
1865 	int i, ei = wi->initial_stat_idx;
1866 	u64 *data = wi->data;
1867 
1868 	wi->sta_count++;
1869 
1870 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1871 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1872 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1873 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1874 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1875 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1876 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1877 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1878 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1879 	if (eht) {
1880 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1881 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1882 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1883 	}
1884 
1885 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1886 		data[ei++] += stats->tx_bw[i];
1887 
1888 	for (i = 0; i < (eht ? 14 : 12); i++)
1889 		data[ei++] += stats->tx_mcs[i];
1890 
1891 	for (i = 0; i < 4; i++)
1892 		data[ei++] += stats->tx_nss[i];
1893 
1894 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1895 }
1896 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1897 
1898 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1899 {
1900 #ifdef CONFIG_PAGE_POOL_STATS
1901 	struct page_pool_stats stats = {};
1902 	int i;
1903 
1904 	mt76_for_each_q_rx(dev, i)
1905 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1906 
1907 	page_pool_ethtool_stats_get(data, &stats);
1908 	*index += page_pool_ethtool_stats_get_count();
1909 #endif
1910 }
1911 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1912 
1913 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1914 {
1915 	struct ieee80211_hw *hw = phy->hw;
1916 	struct mt76_dev *dev = phy->dev;
1917 
1918 	if (dev->region == NL80211_DFS_UNSET ||
1919 	    test_bit(MT76_SCANNING, &phy->state))
1920 		return MT_DFS_STATE_DISABLED;
1921 
1922 	if (!hw->conf.radar_enabled) {
1923 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1924 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1925 			return MT_DFS_STATE_ACTIVE;
1926 
1927 		return MT_DFS_STATE_DISABLED;
1928 	}
1929 
1930 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1931 		return MT_DFS_STATE_CAC;
1932 
1933 	return MT_DFS_STATE_ACTIVE;
1934 }
1935 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1936