xref: /linux/drivers/net/wireless/mediatek/mt76/mac80211.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 	struct device_node *np = dev->dev->of_node;
201 
202 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 		return 0;
204 
205 	np = of_get_child_by_name(np, "led");
206 	if (np) {
207 		if (!of_device_is_available(np)) {
208 			of_node_put(np);
209 			dev_info(dev->dev,
210 				"led registration was explicitly disabled by dts\n");
211 			return 0;
212 		}
213 
214 		if (phy == &dev->phy) {
215 			int led_pin;
216 
217 			if (!of_property_read_u32(np, "led-sources", &led_pin))
218 				phy->leds.pin = led_pin;
219 
220 			phy->leds.al =
221 				of_property_read_bool(np, "led-active-low");
222 		}
223 
224 		of_node_put(np);
225 	}
226 
227 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 		 wiphy_name(hw->wiphy));
229 
230 	phy->leds.cdev.name = phy->leds.name;
231 	phy->leds.cdev.default_trigger =
232 		ieee80211_create_tpt_led_trigger(hw,
233 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 					mt76_tpt_blink,
235 					ARRAY_SIZE(mt76_tpt_blink));
236 
237 	dev_info(dev->dev,
238 		"registering led '%s'\n", phy->leds.name);
239 
240 	return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242 
243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 		return;
247 
248 	led_classdev_unregister(&phy->leds.cdev);
249 }
250 
251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 				 struct ieee80211_supported_band *sband,
253 				 bool vht)
254 {
255 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 	int i, nstream = hweight8(phy->antenna_mask);
257 	struct ieee80211_sta_vht_cap *vht_cap;
258 	u16 mcs_map = 0;
259 
260 	if (nstream > 1)
261 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 	else
263 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264 
265 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267 
268 	if (!vht)
269 		return;
270 
271 	vht_cap = &sband->vht_cap;
272 	if (nstream > 1)
273 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 	else
275 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278 
279 	for (i = 0; i < 8; i++) {
280 		if (i < nstream)
281 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 		else
283 			mcs_map |=
284 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 	}
286 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 		vht_cap->vht_mcs.tx_highest |=
290 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292 
293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 	if (phy->cap.has_2ghz)
296 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 	if (phy->cap.has_5ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 	if (phy->cap.has_6ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303 
304 static int
305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 		const struct ieee80211_channel *chan, int n_chan,
307 		struct ieee80211_rate *rates, int n_rates,
308 		bool ht, bool vht)
309 {
310 	struct ieee80211_supported_band *sband = &msband->sband;
311 	struct ieee80211_sta_vht_cap *vht_cap;
312 	struct ieee80211_sta_ht_cap *ht_cap;
313 	struct mt76_dev *dev = phy->dev;
314 	void *chanlist;
315 	int size;
316 
317 	size = n_chan * sizeof(*chan);
318 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 	if (!chanlist)
320 		return -ENOMEM;
321 
322 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 				    GFP_KERNEL);
324 	if (!msband->chan)
325 		return -ENOMEM;
326 
327 	sband->channels = chanlist;
328 	sband->n_channels = n_chan;
329 	sband->bitrates = rates;
330 	sband->n_bitrates = n_rates;
331 
332 	if (!ht)
333 		return 0;
334 
335 	ht_cap = &sband->ht_cap;
336 	ht_cap->ht_supported = true;
337 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 		       IEEE80211_HT_CAP_GRN_FLD |
339 		       IEEE80211_HT_CAP_SGI_20 |
340 		       IEEE80211_HT_CAP_SGI_40 |
341 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342 
343 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345 
346 	mt76_init_stream_cap(phy, sband, vht);
347 
348 	if (!vht)
349 		return 0;
350 
351 	vht_cap = &sband->vht_cap;
352 	vht_cap->vht_supported = true;
353 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 			IEEE80211_VHT_CAP_RXSTBC_1 |
355 			IEEE80211_VHT_CAP_SHORT_GI_80 |
356 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357 
358 	return 0;
359 }
360 
361 static int
362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369 			       n_rates, true, false);
370 }
371 
372 static int
373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates, bool vht)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380 			       n_rates, true, vht);
381 }
382 
383 static int
384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 		   int n_rates)
386 {
387 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388 
389 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391 			       n_rates, false, false);
392 }
393 
394 static void
395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 		 enum nl80211_band band)
397 {
398 	struct ieee80211_supported_band *sband = &msband->sband;
399 	bool found = false;
400 	int i;
401 
402 	if (!sband)
403 		return;
404 
405 	for (i = 0; i < sband->n_channels; i++) {
406 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 			continue;
408 
409 		found = true;
410 		break;
411 	}
412 
413 	if (found) {
414 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
415 					NL80211_CHAN_HT20);
416 		phy->chan_state = &msband->chan[0];
417 		phy->dev->band_phys[band] = phy;
418 		return;
419 	}
420 
421 	sband->n_channels = 0;
422 	if (phy->hw->wiphy->bands[band] == sband)
423 		phy->hw->wiphy->bands[band] = NULL;
424 }
425 
426 static int
427 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
428 {
429 	struct mt76_dev *dev = phy->dev;
430 	struct wiphy *wiphy = hw->wiphy;
431 
432 	INIT_LIST_HEAD(&phy->tx_list);
433 	spin_lock_init(&phy->tx_lock);
434 	INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
435 
436 	if ((void *)phy != hw->priv)
437 		return 0;
438 
439 	SET_IEEE80211_DEV(hw, dev->dev);
440 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
441 
442 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
443 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
444 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
445 			WIPHY_FLAG_SUPPORTS_TDLS |
446 			WIPHY_FLAG_AP_UAPSD;
447 
448 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
449 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
450 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
451 
452 	if (!wiphy->available_antennas_tx)
453 		wiphy->available_antennas_tx = phy->antenna_mask;
454 	if (!wiphy->available_antennas_rx)
455 		wiphy->available_antennas_rx = phy->antenna_mask;
456 
457 	wiphy->sar_capa = &mt76_sar_capa;
458 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
459 				sizeof(struct mt76_freq_range_power),
460 				GFP_KERNEL);
461 	if (!phy->frp)
462 		return -ENOMEM;
463 
464 	hw->txq_data_size = sizeof(struct mt76_txq);
465 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
466 
467 	if (!hw->max_tx_fragments)
468 		hw->max_tx_fragments = 16;
469 
470 	ieee80211_hw_set(hw, SIGNAL_DBM);
471 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
472 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
473 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
474 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
475 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
476 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
477 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
478 
479 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
480 	    hw->max_tx_fragments > 1) {
481 		ieee80211_hw_set(hw, TX_AMSDU);
482 		ieee80211_hw_set(hw, TX_FRAG_LIST);
483 	}
484 
485 	ieee80211_hw_set(hw, MFP_CAPABLE);
486 	ieee80211_hw_set(hw, AP_LINK_PS);
487 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
488 
489 	return 0;
490 }
491 
492 struct mt76_phy *
493 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
494 		     u8 band_idx)
495 {
496 	struct ieee80211_hw *hw = dev->phy.hw;
497 	unsigned int phy_size;
498 	struct mt76_phy *phy;
499 
500 	phy_size = ALIGN(sizeof(*phy), 8);
501 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
502 	if (!phy)
503 		return NULL;
504 
505 	phy->dev = dev;
506 	phy->hw = hw;
507 	phy->priv = (void *)phy + phy_size;
508 	phy->band_idx = band_idx;
509 
510 	return phy;
511 }
512 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
513 
514 struct mt76_phy *
515 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
516 	       const struct ieee80211_ops *ops, u8 band_idx)
517 {
518 	struct ieee80211_hw *hw;
519 	unsigned int phy_size;
520 	struct mt76_phy *phy;
521 
522 	phy_size = ALIGN(sizeof(*phy), 8);
523 	hw = ieee80211_alloc_hw(size + phy_size, ops);
524 	if (!hw)
525 		return NULL;
526 
527 	phy = hw->priv;
528 	phy->dev = dev;
529 	phy->hw = hw;
530 	phy->priv = hw->priv + phy_size;
531 	phy->band_idx = band_idx;
532 
533 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
534 	hw->wiphy->interface_modes =
535 		BIT(NL80211_IFTYPE_STATION) |
536 		BIT(NL80211_IFTYPE_AP) |
537 #ifdef CONFIG_MAC80211_MESH
538 		BIT(NL80211_IFTYPE_MESH_POINT) |
539 #endif
540 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
541 		BIT(NL80211_IFTYPE_P2P_GO) |
542 		BIT(NL80211_IFTYPE_ADHOC);
543 
544 	return phy;
545 }
546 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
547 
548 int mt76_register_phy(struct mt76_phy *phy, bool vht,
549 		      struct ieee80211_rate *rates, int n_rates)
550 {
551 	int ret;
552 
553 	ret = mt76_phy_init(phy, phy->hw);
554 	if (ret)
555 		return ret;
556 
557 	if (phy->cap.has_2ghz) {
558 		ret = mt76_init_sband_2g(phy, rates, n_rates);
559 		if (ret)
560 			return ret;
561 	}
562 
563 	if (phy->cap.has_5ghz) {
564 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
565 		if (ret)
566 			return ret;
567 	}
568 
569 	if (phy->cap.has_6ghz) {
570 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
571 		if (ret)
572 			return ret;
573 	}
574 
575 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
576 		ret = mt76_led_init(phy);
577 		if (ret)
578 			return ret;
579 	}
580 
581 	wiphy_read_of_freq_limits(phy->hw->wiphy);
582 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
583 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
584 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
585 
586 	if ((void *)phy == phy->hw->priv) {
587 		ret = ieee80211_register_hw(phy->hw);
588 		if (ret)
589 			return ret;
590 	}
591 
592 	set_bit(MT76_STATE_REGISTERED, &phy->state);
593 	phy->dev->phys[phy->band_idx] = phy;
594 
595 	return 0;
596 }
597 EXPORT_SYMBOL_GPL(mt76_register_phy);
598 
599 void mt76_unregister_phy(struct mt76_phy *phy)
600 {
601 	struct mt76_dev *dev = phy->dev;
602 
603 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
604 		return;
605 
606 	if (IS_ENABLED(CONFIG_MT76_LEDS))
607 		mt76_led_cleanup(phy);
608 	mt76_tx_status_check(dev, true);
609 	ieee80211_unregister_hw(phy->hw);
610 	dev->phys[phy->band_idx] = NULL;
611 }
612 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
613 
614 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
615 {
616 	bool is_qrx = mt76_queue_is_rx(dev, q);
617 	struct page_pool_params pp_params = {
618 		.order = 0,
619 		.flags = 0,
620 		.nid = NUMA_NO_NODE,
621 		.dev = dev->dma_dev,
622 	};
623 	int idx = is_qrx ? q - dev->q_rx : -1;
624 
625 	/* Allocate page_pools just for rx/wed_tx_free queues */
626 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
627 		return 0;
628 
629 	switch (idx) {
630 	case MT_RXQ_MAIN:
631 	case MT_RXQ_BAND1:
632 	case MT_RXQ_BAND2:
633 	case MT_RXQ_NPU0:
634 	case MT_RXQ_NPU1:
635 		pp_params.pool_size = 256;
636 		break;
637 	default:
638 		pp_params.pool_size = 16;
639 		break;
640 	}
641 
642 	if (mt76_is_mmio(dev)) {
643 		/* rely on page_pool for DMA mapping */
644 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
645 		pp_params.dma_dir = DMA_FROM_DEVICE;
646 		pp_params.max_len = PAGE_SIZE;
647 		pp_params.offset = 0;
648 		/* NAPI is available just for rx queues */
649 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
650 			pp_params.napi = &dev->napi[idx];
651 	}
652 
653 	q->page_pool = page_pool_create(&pp_params);
654 	if (IS_ERR(q->page_pool)) {
655 		int err = PTR_ERR(q->page_pool);
656 
657 		q->page_pool = NULL;
658 		return err;
659 	}
660 
661 	return 0;
662 }
663 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
664 
665 struct mt76_dev *
666 mt76_alloc_device(struct device *pdev, unsigned int size,
667 		  const struct ieee80211_ops *ops,
668 		  const struct mt76_driver_ops *drv_ops)
669 {
670 	struct ieee80211_hw *hw;
671 	struct mt76_phy *phy;
672 	struct mt76_dev *dev;
673 	int i;
674 
675 	hw = ieee80211_alloc_hw(size, ops);
676 	if (!hw)
677 		return NULL;
678 
679 	dev = hw->priv;
680 	dev->hw = hw;
681 	dev->dev = pdev;
682 	dev->drv = drv_ops;
683 	dev->dma_dev = pdev;
684 
685 	phy = &dev->phy;
686 	phy->dev = dev;
687 	phy->hw = hw;
688 	phy->band_idx = MT_BAND0;
689 	dev->phys[phy->band_idx] = phy;
690 
691 	spin_lock_init(&dev->rx_lock);
692 	spin_lock_init(&dev->lock);
693 	spin_lock_init(&dev->cc_lock);
694 	spin_lock_init(&dev->status_lock);
695 	spin_lock_init(&dev->wed_lock);
696 	mutex_init(&dev->mutex);
697 	init_waitqueue_head(&dev->tx_wait);
698 
699 	skb_queue_head_init(&dev->mcu.res_q);
700 	init_waitqueue_head(&dev->mcu.wait);
701 	mutex_init(&dev->mcu.mutex);
702 	dev->tx_worker.fn = mt76_tx_worker;
703 
704 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
705 	hw->wiphy->interface_modes =
706 		BIT(NL80211_IFTYPE_STATION) |
707 		BIT(NL80211_IFTYPE_AP) |
708 #ifdef CONFIG_MAC80211_MESH
709 		BIT(NL80211_IFTYPE_MESH_POINT) |
710 #endif
711 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
712 		BIT(NL80211_IFTYPE_P2P_GO) |
713 		BIT(NL80211_IFTYPE_ADHOC);
714 
715 	spin_lock_init(&dev->token_lock);
716 	idr_init(&dev->token);
717 
718 	spin_lock_init(&dev->rx_token_lock);
719 	idr_init(&dev->rx_token);
720 
721 	INIT_LIST_HEAD(&dev->wcid_list);
722 	INIT_LIST_HEAD(&dev->sta_poll_list);
723 	spin_lock_init(&dev->sta_poll_lock);
724 
725 	INIT_LIST_HEAD(&dev->txwi_cache);
726 	INIT_LIST_HEAD(&dev->rxwi_cache);
727 	dev->token_size = dev->drv->token_size;
728 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
729 
730 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
731 		skb_queue_head_init(&dev->rx_skb[i]);
732 
733 	dev->wq = alloc_ordered_workqueue("mt76", 0);
734 	if (!dev->wq) {
735 		ieee80211_free_hw(hw);
736 		return NULL;
737 	}
738 
739 	return dev;
740 }
741 EXPORT_SYMBOL_GPL(mt76_alloc_device);
742 
743 int mt76_register_device(struct mt76_dev *dev, bool vht,
744 			 struct ieee80211_rate *rates, int n_rates)
745 {
746 	struct ieee80211_hw *hw = dev->hw;
747 	struct mt76_phy *phy = &dev->phy;
748 	int ret;
749 
750 	dev_set_drvdata(dev->dev, dev);
751 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
752 	ret = mt76_phy_init(phy, hw);
753 	if (ret)
754 		return ret;
755 
756 	if (phy->cap.has_2ghz) {
757 		ret = mt76_init_sband_2g(phy, rates, n_rates);
758 		if (ret)
759 			return ret;
760 	}
761 
762 	if (phy->cap.has_5ghz) {
763 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
764 		if (ret)
765 			return ret;
766 	}
767 
768 	if (phy->cap.has_6ghz) {
769 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
770 		if (ret)
771 			return ret;
772 	}
773 
774 	wiphy_read_of_freq_limits(hw->wiphy);
775 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
776 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
777 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
778 
779 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
780 		ret = mt76_led_init(phy);
781 		if (ret)
782 			return ret;
783 	}
784 
785 	ret = ieee80211_register_hw(hw);
786 	if (ret)
787 		return ret;
788 
789 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
790 	set_bit(MT76_STATE_REGISTERED, &phy->state);
791 	sched_set_fifo_low(dev->tx_worker.task);
792 
793 	return 0;
794 }
795 EXPORT_SYMBOL_GPL(mt76_register_device);
796 
797 void mt76_unregister_device(struct mt76_dev *dev)
798 {
799 	struct ieee80211_hw *hw = dev->hw;
800 
801 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
802 		return;
803 
804 	if (IS_ENABLED(CONFIG_MT76_LEDS))
805 		mt76_led_cleanup(&dev->phy);
806 	mt76_tx_status_check(dev, true);
807 	mt76_wcid_cleanup(dev, &dev->global_wcid);
808 	ieee80211_unregister_hw(hw);
809 }
810 EXPORT_SYMBOL_GPL(mt76_unregister_device);
811 
812 void mt76_free_device(struct mt76_dev *dev)
813 {
814 	mt76_worker_teardown(&dev->tx_worker);
815 	if (dev->wq) {
816 		destroy_workqueue(dev->wq);
817 		dev->wq = NULL;
818 	}
819 	mt76_npu_deinit(dev);
820 	ieee80211_free_hw(dev->hw);
821 }
822 EXPORT_SYMBOL_GPL(mt76_free_device);
823 
824 static void mt76_reset_phy(struct mt76_phy *phy)
825 {
826 	if (!phy)
827 		return;
828 
829 	INIT_LIST_HEAD(&phy->tx_list);
830 	phy->num_sta = 0;
831 	phy->chanctx = NULL;
832 	mt76_roc_complete(phy);
833 }
834 
835 void mt76_reset_device(struct mt76_dev *dev)
836 {
837 	int i;
838 
839 	rcu_read_lock();
840 	for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
841 		struct mt76_wcid *wcid;
842 
843 		wcid = rcu_dereference(dev->wcid[i]);
844 		if (!wcid)
845 			continue;
846 
847 		wcid->sta = 0;
848 		mt76_wcid_cleanup(dev, wcid);
849 		rcu_assign_pointer(dev->wcid[i], NULL);
850 	}
851 	rcu_read_unlock();
852 
853 	INIT_LIST_HEAD(&dev->wcid_list);
854 	INIT_LIST_HEAD(&dev->sta_poll_list);
855 	dev->vif_mask = 0;
856 	memset(dev->wcid_mask, 0, sizeof(dev->wcid_mask));
857 
858 	mt76_reset_phy(&dev->phy);
859 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
860 		mt76_reset_phy(dev->phys[i]);
861 }
862 EXPORT_SYMBOL_GPL(mt76_reset_device);
863 
864 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
865 			      struct ieee80211_vif *vif)
866 {
867 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
868 	struct mt76_chanctx *ctx;
869 
870 	if (!hw->wiphy->n_radio)
871 		return hw->priv;
872 
873 	if (!mlink->ctx)
874 		return NULL;
875 
876 	ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
877 	return ctx->phy;
878 }
879 EXPORT_SYMBOL_GPL(mt76_vif_phy);
880 
881 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
882 {
883 	struct sk_buff *skb = phy->rx_amsdu[q].head;
884 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
885 	struct mt76_dev *dev = phy->dev;
886 
887 	phy->rx_amsdu[q].head = NULL;
888 	phy->rx_amsdu[q].tail = NULL;
889 
890 	/*
891 	 * Validate if the amsdu has a proper first subframe.
892 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
893 	 * flag of the QoS header gets flipped. In such cases, the first
894 	 * subframe has a LLC/SNAP header in the location of the destination
895 	 * address.
896 	 */
897 	if (skb_shinfo(skb)->frag_list) {
898 		int offset = 0;
899 
900 		if (!(status->flag & RX_FLAG_8023)) {
901 			offset = ieee80211_get_hdrlen_from_skb(skb);
902 
903 			if ((status->flag &
904 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
905 			    RX_FLAG_DECRYPTED)
906 				offset += 8;
907 		}
908 
909 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
910 			dev_kfree_skb(skb);
911 			return;
912 		}
913 	}
914 	__skb_queue_tail(&dev->rx_skb[q], skb);
915 }
916 
917 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
918 				  struct sk_buff *skb)
919 {
920 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
921 
922 	if (phy->rx_amsdu[q].head &&
923 	    (!status->amsdu || status->first_amsdu ||
924 	     status->seqno != phy->rx_amsdu[q].seqno))
925 		mt76_rx_release_amsdu(phy, q);
926 
927 	if (!phy->rx_amsdu[q].head) {
928 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
929 		phy->rx_amsdu[q].seqno = status->seqno;
930 		phy->rx_amsdu[q].head = skb;
931 	} else {
932 		*phy->rx_amsdu[q].tail = skb;
933 		phy->rx_amsdu[q].tail = &skb->next;
934 	}
935 
936 	if (!status->amsdu || status->last_amsdu)
937 		mt76_rx_release_amsdu(phy, q);
938 }
939 
940 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
941 {
942 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
943 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
944 
945 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
946 		dev_kfree_skb(skb);
947 		return;
948 	}
949 
950 #ifdef CONFIG_NL80211_TESTMODE
951 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
952 		phy->test.rx_stats.packets[q]++;
953 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
954 			phy->test.rx_stats.fcs_error[q]++;
955 	}
956 #endif
957 
958 	mt76_rx_release_burst(phy, q, skb);
959 }
960 EXPORT_SYMBOL_GPL(mt76_rx);
961 
962 bool mt76_has_tx_pending(struct mt76_phy *phy)
963 {
964 	struct mt76_queue *q;
965 	int i;
966 
967 	for (i = 0; i < __MT_TXQ_MAX; i++) {
968 		q = phy->q_tx[i];
969 		if (q && q->queued)
970 			return true;
971 	}
972 
973 	return false;
974 }
975 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
976 
977 static struct mt76_channel_state *
978 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
979 {
980 	struct mt76_sband *msband;
981 	int idx;
982 
983 	if (c->band == NL80211_BAND_2GHZ)
984 		msband = &phy->sband_2g;
985 	else if (c->band == NL80211_BAND_6GHZ)
986 		msband = &phy->sband_6g;
987 	else
988 		msband = &phy->sband_5g;
989 
990 	idx = c - &msband->sband.channels[0];
991 	return &msband->chan[idx];
992 }
993 
994 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
995 {
996 	struct mt76_channel_state *state = phy->chan_state;
997 
998 	state->cc_active += ktime_to_us(ktime_sub(time,
999 						  phy->survey_time));
1000 	phy->survey_time = time;
1001 }
1002 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
1003 
1004 void mt76_update_survey(struct mt76_phy *phy)
1005 {
1006 	struct mt76_dev *dev = phy->dev;
1007 	ktime_t cur_time;
1008 
1009 	if (dev->drv->update_survey)
1010 		dev->drv->update_survey(phy);
1011 
1012 	cur_time = ktime_get_boottime();
1013 	mt76_update_survey_active_time(phy, cur_time);
1014 
1015 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
1016 		struct mt76_channel_state *state = phy->chan_state;
1017 
1018 		spin_lock_bh(&dev->cc_lock);
1019 		state->cc_bss_rx += dev->cur_cc_bss_rx;
1020 		dev->cur_cc_bss_rx = 0;
1021 		spin_unlock_bh(&dev->cc_lock);
1022 	}
1023 }
1024 EXPORT_SYMBOL_GPL(mt76_update_survey);
1025 
1026 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1027 		       bool offchannel)
1028 {
1029 	struct mt76_dev *dev = phy->dev;
1030 	int timeout = HZ / 5;
1031 	int ret;
1032 
1033 	set_bit(MT76_RESET, &phy->state);
1034 
1035 	mt76_worker_disable(&dev->tx_worker);
1036 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
1037 	mt76_update_survey(phy);
1038 
1039 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
1040 	    phy->chandef.width != chandef->width)
1041 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
1042 
1043 	phy->chandef = *chandef;
1044 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
1045 	phy->offchannel = offchannel;
1046 
1047 	if (!offchannel)
1048 		phy->main_chandef = *chandef;
1049 
1050 	if (chandef->chan != phy->main_chandef.chan)
1051 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1052 
1053 	ret = dev->drv->set_channel(phy);
1054 
1055 	clear_bit(MT76_RESET, &phy->state);
1056 	mt76_worker_enable(&dev->tx_worker);
1057 	mt76_worker_schedule(&dev->tx_worker);
1058 
1059 	return ret;
1060 }
1061 
1062 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1063 		     bool offchannel)
1064 {
1065 	struct mt76_dev *dev = phy->dev;
1066 	int ret;
1067 
1068 	cancel_delayed_work_sync(&phy->mac_work);
1069 
1070 	mutex_lock(&dev->mutex);
1071 	ret = __mt76_set_channel(phy, chandef, offchannel);
1072 	mutex_unlock(&dev->mutex);
1073 
1074 	return ret;
1075 }
1076 
1077 int mt76_update_channel(struct mt76_phy *phy)
1078 {
1079 	struct ieee80211_hw *hw = phy->hw;
1080 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1081 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1082 
1083 	phy->radar_enabled = hw->conf.radar_enabled;
1084 
1085 	return mt76_set_channel(phy, chandef, offchannel);
1086 }
1087 EXPORT_SYMBOL_GPL(mt76_update_channel);
1088 
1089 static struct mt76_sband *
1090 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1091 {
1092 	if (*idx < phy->sband_2g.sband.n_channels)
1093 		return &phy->sband_2g;
1094 
1095 	*idx -= phy->sband_2g.sband.n_channels;
1096 	if (*idx < phy->sband_5g.sband.n_channels)
1097 		return &phy->sband_5g;
1098 
1099 	*idx -= phy->sband_5g.sband.n_channels;
1100 	if (*idx < phy->sband_6g.sband.n_channels)
1101 		return &phy->sband_6g;
1102 
1103 	*idx -= phy->sband_6g.sband.n_channels;
1104 	return NULL;
1105 }
1106 
1107 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1108 		    struct survey_info *survey)
1109 {
1110 	struct mt76_phy *phy = hw->priv;
1111 	struct mt76_dev *dev = phy->dev;
1112 	struct mt76_sband *sband = NULL;
1113 	struct ieee80211_channel *chan;
1114 	struct mt76_channel_state *state;
1115 	int phy_idx = 0;
1116 	int ret = 0;
1117 
1118 	mutex_lock(&dev->mutex);
1119 
1120 	for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1121 		sband = NULL;
1122 		phy = dev->phys[phy_idx];
1123 		if (!phy || phy->hw != hw)
1124 			continue;
1125 
1126 		sband = mt76_get_survey_sband(phy, &idx);
1127 
1128 		if (idx == 0 && phy->dev->drv->update_survey)
1129 			mt76_update_survey(phy);
1130 
1131 		if (sband || !hw->wiphy->n_radio)
1132 			break;
1133 	}
1134 
1135 	if (!sband) {
1136 		ret = -ENOENT;
1137 		goto out;
1138 	}
1139 
1140 	chan = &sband->sband.channels[idx];
1141 	state = mt76_channel_state(phy, chan);
1142 
1143 	memset(survey, 0, sizeof(*survey));
1144 	survey->channel = chan;
1145 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1146 	survey->filled |= dev->drv->survey_flags;
1147 	if (state->noise)
1148 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1149 
1150 	if (chan == phy->main_chandef.chan) {
1151 		survey->filled |= SURVEY_INFO_IN_USE;
1152 
1153 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1154 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1155 	}
1156 
1157 	survey->time_busy = div_u64(state->cc_busy, 1000);
1158 	survey->time_rx = div_u64(state->cc_rx, 1000);
1159 	survey->time = div_u64(state->cc_active, 1000);
1160 	survey->noise = state->noise;
1161 
1162 	spin_lock_bh(&dev->cc_lock);
1163 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1164 	survey->time_tx = div_u64(state->cc_tx, 1000);
1165 	spin_unlock_bh(&dev->cc_lock);
1166 
1167 out:
1168 	mutex_unlock(&dev->mutex);
1169 
1170 	return ret;
1171 }
1172 EXPORT_SYMBOL_GPL(mt76_get_survey);
1173 
1174 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1175 			 struct ieee80211_key_conf *key)
1176 {
1177 	struct ieee80211_key_seq seq;
1178 	int i;
1179 
1180 	wcid->rx_check_pn = false;
1181 
1182 	if (!key)
1183 		return;
1184 
1185 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1186 		return;
1187 
1188 	wcid->rx_check_pn = true;
1189 
1190 	/* data frame */
1191 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1192 		ieee80211_get_key_rx_seq(key, i, &seq);
1193 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1194 	}
1195 
1196 	/* robust management frame */
1197 	ieee80211_get_key_rx_seq(key, -1, &seq);
1198 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1199 
1200 }
1201 EXPORT_SYMBOL(mt76_wcid_key_setup);
1202 
1203 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1204 {
1205 	int signal = -128;
1206 	u8 chains;
1207 
1208 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1209 		int cur, diff;
1210 
1211 		cur = *chain_signal;
1212 		if (!(chains & BIT(0)) ||
1213 		    cur > 0)
1214 			continue;
1215 
1216 		if (cur > signal)
1217 			swap(cur, signal);
1218 
1219 		diff = signal - cur;
1220 		if (diff == 0)
1221 			signal += 3;
1222 		else if (diff <= 2)
1223 			signal += 2;
1224 		else if (diff <= 6)
1225 			signal += 1;
1226 	}
1227 
1228 	return signal;
1229 }
1230 EXPORT_SYMBOL(mt76_rx_signal);
1231 
1232 static void
1233 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1234 		struct ieee80211_hw **hw,
1235 		struct ieee80211_sta **sta)
1236 {
1237 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1238 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1239 	struct mt76_rx_status mstat;
1240 
1241 	mstat = *((struct mt76_rx_status *)skb->cb);
1242 	memset(status, 0, sizeof(*status));
1243 
1244 	skb->priority = mstat.qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1245 
1246 	status->flag = mstat.flag;
1247 	status->freq = mstat.freq;
1248 	status->enc_flags = mstat.enc_flags;
1249 	status->encoding = mstat.encoding;
1250 	status->bw = mstat.bw;
1251 	if (status->encoding == RX_ENC_EHT) {
1252 		status->eht.ru = mstat.eht.ru;
1253 		status->eht.gi = mstat.eht.gi;
1254 	} else {
1255 		status->he_ru = mstat.he_ru;
1256 		status->he_gi = mstat.he_gi;
1257 		status->he_dcm = mstat.he_dcm;
1258 	}
1259 	status->rate_idx = mstat.rate_idx;
1260 	status->nss = mstat.nss;
1261 	status->band = mstat.band;
1262 	status->signal = mstat.signal;
1263 	status->chains = mstat.chains;
1264 	status->ampdu_reference = mstat.ampdu_ref;
1265 	status->device_timestamp = mstat.timestamp;
1266 	status->mactime = mstat.timestamp;
1267 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1268 	if (status->signal <= -128)
1269 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1270 
1271 	if (ieee80211_is_beacon(hdr->frame_control) ||
1272 	    ieee80211_is_probe_resp(hdr->frame_control))
1273 		status->boottime_ns = ktime_get_boottime_ns();
1274 
1275 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1276 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1277 		     sizeof(mstat.chain_signal));
1278 	memcpy(status->chain_signal, mstat.chain_signal,
1279 	       sizeof(mstat.chain_signal));
1280 
1281 	if (mstat.wcid) {
1282 		status->link_valid = mstat.wcid->link_valid;
1283 		status->link_id = mstat.wcid->link_id;
1284 	}
1285 
1286 	*sta = wcid_to_sta(mstat.wcid);
1287 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1288 }
1289 
1290 static void
1291 mt76_check_ccmp_pn(struct sk_buff *skb)
1292 {
1293 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1294 	struct mt76_wcid *wcid = status->wcid;
1295 	struct ieee80211_hdr *hdr;
1296 	int security_idx;
1297 	int ret;
1298 
1299 	if (!(status->flag & RX_FLAG_DECRYPTED))
1300 		return;
1301 
1302 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1303 		return;
1304 
1305 	if (!wcid || !wcid->rx_check_pn)
1306 		return;
1307 
1308 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1309 	if (status->flag & RX_FLAG_8023)
1310 		goto skip_hdr_check;
1311 
1312 	hdr = mt76_skb_get_hdr(skb);
1313 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1314 		/*
1315 		 * Validate the first fragment both here and in mac80211
1316 		 * All further fragments will be validated by mac80211 only.
1317 		 */
1318 		if (ieee80211_is_frag(hdr) &&
1319 		    !ieee80211_is_first_frag(hdr->frame_control))
1320 			return;
1321 	}
1322 
1323 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1324 	 *
1325 	 * the recipient shall maintain a single replay counter for received
1326 	 * individually addressed robust Management frames that are received
1327 	 * with the To DS subfield equal to 0, [...]
1328 	 */
1329 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1330 	    !ieee80211_has_tods(hdr->frame_control))
1331 		security_idx = IEEE80211_NUM_TIDS;
1332 
1333 skip_hdr_check:
1334 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1335 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1336 		     sizeof(status->iv));
1337 	if (ret <= 0) {
1338 		status->flag |= RX_FLAG_ONLY_MONITOR;
1339 		return;
1340 	}
1341 
1342 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1343 
1344 	if (status->flag & RX_FLAG_IV_STRIPPED)
1345 		status->flag |= RX_FLAG_PN_VALIDATED;
1346 }
1347 
1348 static void
1349 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1350 		    int len)
1351 {
1352 	struct mt76_wcid *wcid = status->wcid;
1353 	struct ieee80211_rx_status info = {
1354 		.enc_flags = status->enc_flags,
1355 		.rate_idx = status->rate_idx,
1356 		.encoding = status->encoding,
1357 		.band = status->band,
1358 		.nss = status->nss,
1359 		.bw = status->bw,
1360 	};
1361 	struct ieee80211_sta *sta;
1362 	u32 airtime;
1363 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1364 
1365 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1366 	spin_lock(&dev->cc_lock);
1367 	dev->cur_cc_bss_rx += airtime;
1368 	spin_unlock(&dev->cc_lock);
1369 
1370 	if (!wcid || !wcid->sta)
1371 		return;
1372 
1373 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1374 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1375 }
1376 
1377 static void
1378 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1379 {
1380 	struct mt76_wcid *wcid;
1381 	int wcid_idx;
1382 
1383 	if (!dev->rx_ampdu_len)
1384 		return;
1385 
1386 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1387 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1388 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1389 	else
1390 		wcid = NULL;
1391 	dev->rx_ampdu_status.wcid = wcid;
1392 
1393 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1394 
1395 	dev->rx_ampdu_len = 0;
1396 	dev->rx_ampdu_ref = 0;
1397 }
1398 
1399 static void
1400 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1401 {
1402 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1403 	struct mt76_wcid *wcid = status->wcid;
1404 
1405 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1406 		return;
1407 
1408 	if (!wcid || !wcid->sta) {
1409 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1410 
1411 		if (status->flag & RX_FLAG_8023)
1412 			return;
1413 
1414 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1415 			return;
1416 
1417 		wcid = NULL;
1418 	}
1419 
1420 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1421 	    status->ampdu_ref != dev->rx_ampdu_ref)
1422 		mt76_airtime_flush_ampdu(dev);
1423 
1424 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1425 		if (!dev->rx_ampdu_len ||
1426 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1427 			dev->rx_ampdu_status = *status;
1428 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1429 			dev->rx_ampdu_ref = status->ampdu_ref;
1430 		}
1431 
1432 		dev->rx_ampdu_len += skb->len;
1433 		return;
1434 	}
1435 
1436 	mt76_airtime_report(dev, status, skb->len);
1437 }
1438 
1439 static void
1440 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1441 {
1442 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1443 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1444 	struct ieee80211_sta *sta;
1445 	struct ieee80211_hw *hw;
1446 	struct mt76_wcid *wcid = status->wcid;
1447 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1448 	bool ps;
1449 
1450 	hw = mt76_phy_hw(dev, status->phy_idx);
1451 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1452 	    !(status->flag & RX_FLAG_8023)) {
1453 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1454 		if (sta)
1455 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1456 	}
1457 
1458 	mt76_airtime_check(dev, skb);
1459 
1460 	if (!wcid || !wcid->sta)
1461 		return;
1462 
1463 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1464 
1465 	if (status->signal <= 0)
1466 		ewma_signal_add(&wcid->rssi, -status->signal);
1467 
1468 	wcid->inactive_count = 0;
1469 
1470 	if (status->flag & RX_FLAG_8023)
1471 		return;
1472 
1473 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1474 		return;
1475 
1476 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1477 		ieee80211_sta_pspoll(sta);
1478 		return;
1479 	}
1480 
1481 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1482 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1483 	      ieee80211_is_data(hdr->frame_control)))
1484 		return;
1485 
1486 	ps = ieee80211_has_pm(hdr->frame_control);
1487 
1488 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1489 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1490 		ieee80211_sta_uapsd_trigger(sta, tidno);
1491 
1492 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1493 		return;
1494 
1495 	if (ps)
1496 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1497 
1498 	if (dev->drv->sta_ps)
1499 		dev->drv->sta_ps(dev, sta, ps);
1500 
1501 	if (!ps)
1502 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1503 
1504 	ieee80211_sta_ps_transition(sta, ps);
1505 }
1506 
1507 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1508 		      struct napi_struct *napi)
1509 {
1510 	struct ieee80211_sta *sta;
1511 	struct ieee80211_hw *hw;
1512 	struct sk_buff *skb, *tmp;
1513 	LIST_HEAD(list);
1514 
1515 	spin_lock(&dev->rx_lock);
1516 	while ((skb = __skb_dequeue(frames)) != NULL) {
1517 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1518 
1519 		mt76_check_ccmp_pn(skb);
1520 		skb_shinfo(skb)->frag_list = NULL;
1521 		mt76_rx_convert(dev, skb, &hw, &sta);
1522 		ieee80211_rx_list(hw, sta, skb, &list);
1523 
1524 		/* subsequent amsdu frames */
1525 		while (nskb) {
1526 			skb = nskb;
1527 			nskb = nskb->next;
1528 			skb->next = NULL;
1529 
1530 			mt76_rx_convert(dev, skb, &hw, &sta);
1531 			ieee80211_rx_list(hw, sta, skb, &list);
1532 		}
1533 	}
1534 	spin_unlock(&dev->rx_lock);
1535 
1536 	if (!napi) {
1537 		netif_receive_skb_list(&list);
1538 		return;
1539 	}
1540 
1541 	list_for_each_entry_safe(skb, tmp, &list, list) {
1542 		skb_list_del_init(skb);
1543 		napi_gro_receive(napi, skb);
1544 	}
1545 }
1546 
1547 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1548 			   struct napi_struct *napi)
1549 {
1550 	struct sk_buff_head frames;
1551 	struct sk_buff *skb;
1552 
1553 	__skb_queue_head_init(&frames);
1554 
1555 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1556 		mt76_check_sta(dev, skb);
1557 		if (mtk_wed_device_active(&dev->mmio.wed) ||
1558 		    mt76_npu_device_active(dev))
1559 			__skb_queue_tail(&frames, skb);
1560 		else
1561 			mt76_rx_aggr_reorder(skb, &frames);
1562 	}
1563 
1564 	mt76_rx_complete(dev, &frames, napi);
1565 }
1566 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1567 
1568 static int
1569 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1570 	     struct ieee80211_sta *sta)
1571 {
1572 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1573 	struct mt76_dev *dev = phy->dev;
1574 	int ret;
1575 	int i;
1576 
1577 	mutex_lock(&dev->mutex);
1578 
1579 	ret = dev->drv->sta_add(dev, vif, sta);
1580 	if (ret)
1581 		goto out;
1582 
1583 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1584 		struct mt76_txq *mtxq;
1585 
1586 		if (!sta->txq[i])
1587 			continue;
1588 
1589 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1590 		mtxq->wcid = wcid->idx;
1591 	}
1592 
1593 	ewma_signal_init(&wcid->rssi);
1594 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1595 	phy->num_sta++;
1596 
1597 	mt76_wcid_init(wcid, phy->band_idx);
1598 out:
1599 	mutex_unlock(&dev->mutex);
1600 
1601 	return ret;
1602 }
1603 
1604 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1605 		       struct ieee80211_sta *sta)
1606 {
1607 	struct mt76_dev *dev = phy->dev;
1608 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1609 	int i, idx = wcid->idx;
1610 
1611 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1612 		mt76_rx_aggr_stop(dev, wcid, i);
1613 
1614 	if (dev->drv->sta_remove)
1615 		dev->drv->sta_remove(dev, vif, sta);
1616 
1617 	mt76_wcid_cleanup(dev, wcid);
1618 
1619 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1620 	phy->num_sta--;
1621 }
1622 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1623 
1624 static void
1625 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1626 		struct ieee80211_sta *sta)
1627 {
1628 	struct mt76_dev *dev = phy->dev;
1629 
1630 	mutex_lock(&dev->mutex);
1631 	__mt76_sta_remove(phy, vif, sta);
1632 	mutex_unlock(&dev->mutex);
1633 }
1634 
1635 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1636 		   struct ieee80211_sta *sta,
1637 		   enum ieee80211_sta_state old_state,
1638 		   enum ieee80211_sta_state new_state)
1639 {
1640 	struct mt76_phy *phy = hw->priv;
1641 	struct mt76_dev *dev = phy->dev;
1642 	enum mt76_sta_event ev;
1643 
1644 	phy = mt76_vif_phy(hw, vif);
1645 	if (!phy)
1646 		return -EINVAL;
1647 
1648 	if (old_state == IEEE80211_STA_NOTEXIST &&
1649 	    new_state == IEEE80211_STA_NONE)
1650 		return mt76_sta_add(phy, vif, sta);
1651 
1652 	if (old_state == IEEE80211_STA_NONE &&
1653 	    new_state == IEEE80211_STA_NOTEXIST)
1654 		mt76_sta_remove(phy, vif, sta);
1655 
1656 	if (!dev->drv->sta_event)
1657 		return 0;
1658 
1659 	if (old_state == IEEE80211_STA_AUTH &&
1660 	    new_state == IEEE80211_STA_ASSOC)
1661 		ev = MT76_STA_EVENT_ASSOC;
1662 	else if (old_state == IEEE80211_STA_ASSOC &&
1663 		 new_state == IEEE80211_STA_AUTHORIZED)
1664 		ev = MT76_STA_EVENT_AUTHORIZE;
1665 	else if (old_state == IEEE80211_STA_ASSOC &&
1666 		 new_state == IEEE80211_STA_AUTH)
1667 		ev = MT76_STA_EVENT_DISASSOC;
1668 	else
1669 		return 0;
1670 
1671 	return dev->drv->sta_event(dev, vif, sta, ev);
1672 }
1673 EXPORT_SYMBOL_GPL(mt76_sta_state);
1674 
1675 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1676 			     struct ieee80211_sta *sta)
1677 {
1678 	struct mt76_phy *phy = hw->priv;
1679 	struct mt76_dev *dev = phy->dev;
1680 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1681 
1682 	mutex_lock(&dev->mutex);
1683 	spin_lock_bh(&dev->status_lock);
1684 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1685 	spin_unlock_bh(&dev->status_lock);
1686 	mutex_unlock(&dev->mutex);
1687 }
1688 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1689 
1690 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1691 {
1692 	wcid->hw_key_idx = -1;
1693 	wcid->phy_idx = band_idx;
1694 
1695 	INIT_LIST_HEAD(&wcid->tx_list);
1696 	skb_queue_head_init(&wcid->tx_pending);
1697 	skb_queue_head_init(&wcid->tx_offchannel);
1698 
1699 	INIT_LIST_HEAD(&wcid->list);
1700 	idr_init(&wcid->pktid);
1701 
1702 	INIT_LIST_HEAD(&wcid->poll_list);
1703 }
1704 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1705 
1706 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1707 {
1708 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1709 	struct ieee80211_hw *hw;
1710 	struct sk_buff_head list;
1711 	struct sk_buff *skb;
1712 
1713 	mt76_tx_status_lock(dev, &list);
1714 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1715 	mt76_tx_status_unlock(dev, &list);
1716 
1717 	idr_destroy(&wcid->pktid);
1718 
1719 	spin_lock_bh(&phy->tx_lock);
1720 
1721 	if (!list_empty(&wcid->tx_list))
1722 		list_del_init(&wcid->tx_list);
1723 
1724 	spin_lock(&wcid->tx_pending.lock);
1725 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1726 	spin_unlock(&wcid->tx_pending.lock);
1727 
1728 	spin_lock(&wcid->tx_offchannel.lock);
1729 	skb_queue_splice_tail_init(&wcid->tx_offchannel, &list);
1730 	spin_unlock(&wcid->tx_offchannel.lock);
1731 
1732 	spin_unlock_bh(&phy->tx_lock);
1733 
1734 	while ((skb = __skb_dequeue(&list)) != NULL) {
1735 		hw = mt76_tx_status_get_hw(dev, skb);
1736 		ieee80211_free_txskb(hw, skb);
1737 	}
1738 }
1739 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1740 
1741 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1742 {
1743 	if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
1744 		return;
1745 
1746 	spin_lock_bh(&dev->sta_poll_lock);
1747 	if (list_empty(&wcid->poll_list))
1748 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1749 	spin_unlock_bh(&dev->sta_poll_lock);
1750 }
1751 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1752 
1753 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
1754 {
1755 	int n_chains = hweight16(phy->chainmask);
1756 
1757 	txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
1758 	txpower -= mt76_tx_power_path_delta(n_chains);
1759 
1760 	return txpower;
1761 }
1762 EXPORT_SYMBOL_GPL(mt76_get_power_bound);
1763 
1764 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1765 		     unsigned int link_id, int *dbm)
1766 {
1767 	struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1768 	int n_chains, delta;
1769 
1770 	if (!phy)
1771 		return -EINVAL;
1772 
1773 	n_chains = hweight16(phy->chainmask);
1774 	delta = mt76_tx_power_path_delta(n_chains);
1775 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1776 
1777 	return 0;
1778 }
1779 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1780 
1781 int mt76_init_sar_power(struct ieee80211_hw *hw,
1782 			const struct cfg80211_sar_specs *sar)
1783 {
1784 	struct mt76_phy *phy = hw->priv;
1785 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1786 	int i;
1787 
1788 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1789 		return -EINVAL;
1790 
1791 	for (i = 0; i < sar->num_sub_specs; i++) {
1792 		u32 index = sar->sub_specs[i].freq_range_index;
1793 		/* SAR specifies power limitaton in 0.25dbm */
1794 		s32 power = sar->sub_specs[i].power >> 1;
1795 
1796 		if (power > 127 || power < -127)
1797 			power = 127;
1798 
1799 		phy->frp[index].range = &capa->freq_ranges[index];
1800 		phy->frp[index].power = power;
1801 	}
1802 
1803 	return 0;
1804 }
1805 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1806 
1807 int mt76_get_sar_power(struct mt76_phy *phy,
1808 		       struct ieee80211_channel *chan,
1809 		       int power)
1810 {
1811 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1812 	int freq, i;
1813 
1814 	if (!capa || !phy->frp)
1815 		return power;
1816 
1817 	if (power > 127 || power < -127)
1818 		power = 127;
1819 
1820 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1821 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1822 		if (phy->frp[i].range &&
1823 		    freq >= phy->frp[i].range->start_freq &&
1824 		    freq < phy->frp[i].range->end_freq) {
1825 			power = min_t(int, phy->frp[i].power, power);
1826 			break;
1827 		}
1828 	}
1829 
1830 	return power;
1831 }
1832 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1833 
1834 static void
1835 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1836 {
1837 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1838 		ieee80211_csa_finish(vif, 0);
1839 }
1840 
1841 void mt76_csa_finish(struct mt76_dev *dev)
1842 {
1843 	if (!dev->csa_complete)
1844 		return;
1845 
1846 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1847 		IEEE80211_IFACE_ITER_RESUME_ALL,
1848 		__mt76_csa_finish, dev);
1849 
1850 	dev->csa_complete = 0;
1851 }
1852 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1853 
1854 static void
1855 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1856 {
1857 	struct mt76_dev *dev = priv;
1858 
1859 	if (!vif->bss_conf.csa_active)
1860 		return;
1861 
1862 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1863 }
1864 
1865 void mt76_csa_check(struct mt76_dev *dev)
1866 {
1867 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1868 		IEEE80211_IFACE_ITER_RESUME_ALL,
1869 		__mt76_csa_check, dev);
1870 }
1871 EXPORT_SYMBOL_GPL(mt76_csa_check);
1872 
1873 int
1874 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1875 {
1876 	return 0;
1877 }
1878 EXPORT_SYMBOL_GPL(mt76_set_tim);
1879 
1880 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1881 {
1882 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1883 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1884 	u8 *hdr, *pn = status->iv;
1885 
1886 	__skb_push(skb, 8);
1887 	memmove(skb->data, skb->data + 8, hdr_len);
1888 	hdr = skb->data + hdr_len;
1889 
1890 	hdr[0] = pn[5];
1891 	hdr[1] = pn[4];
1892 	hdr[2] = 0;
1893 	hdr[3] = 0x20 | (key_id << 6);
1894 	hdr[4] = pn[3];
1895 	hdr[5] = pn[2];
1896 	hdr[6] = pn[1];
1897 	hdr[7] = pn[0];
1898 
1899 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1900 }
1901 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1902 
1903 int mt76_get_rate(struct mt76_dev *dev,
1904 		  struct ieee80211_supported_band *sband,
1905 		  int idx, bool cck)
1906 {
1907 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1908 	int i, offset = 0, len = sband->n_bitrates;
1909 
1910 	if (cck) {
1911 		if (!is_2g)
1912 			return 0;
1913 
1914 		idx &= ~BIT(2); /* short preamble */
1915 	} else if (is_2g) {
1916 		offset = 4;
1917 	}
1918 
1919 	for (i = offset; i < len; i++) {
1920 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1921 			return i;
1922 	}
1923 
1924 	return 0;
1925 }
1926 EXPORT_SYMBOL_GPL(mt76_get_rate);
1927 
1928 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1929 		  const u8 *mac)
1930 {
1931 	struct mt76_phy *phy = hw->priv;
1932 
1933 	set_bit(MT76_SCANNING, &phy->state);
1934 }
1935 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1936 
1937 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1938 {
1939 	struct mt76_phy *phy = hw->priv;
1940 
1941 	clear_bit(MT76_SCANNING, &phy->state);
1942 }
1943 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1944 
1945 int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
1946 		     u32 *rx_ant)
1947 {
1948 	struct mt76_phy *phy = hw->priv;
1949 	struct mt76_dev *dev = phy->dev;
1950 	int i;
1951 
1952 	mutex_lock(&dev->mutex);
1953 	*tx_ant = 0;
1954 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1955 		if (dev->phys[i] && dev->phys[i]->hw == hw)
1956 			*tx_ant |= dev->phys[i]->chainmask;
1957 	*rx_ant = *tx_ant;
1958 	mutex_unlock(&dev->mutex);
1959 
1960 	return 0;
1961 }
1962 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1963 
1964 struct mt76_queue *
1965 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1966 		int ring_base, void *wed, u32 flags)
1967 {
1968 	struct mt76_queue *hwq;
1969 	int err;
1970 
1971 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1972 	if (!hwq)
1973 		return ERR_PTR(-ENOMEM);
1974 
1975 	hwq->flags = flags;
1976 	hwq->wed = wed;
1977 
1978 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1979 	if (err < 0)
1980 		return ERR_PTR(err);
1981 
1982 	return hwq;
1983 }
1984 EXPORT_SYMBOL_GPL(mt76_init_queue);
1985 
1986 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1987 			 struct mt76_sta_stats *stats, bool eht)
1988 {
1989 	int i, ei = wi->initial_stat_idx;
1990 	u64 *data = wi->data;
1991 
1992 	wi->sta_count++;
1993 
1994 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1995 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1996 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1997 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1998 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1999 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
2000 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
2001 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
2002 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
2003 	if (eht) {
2004 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
2005 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
2006 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
2007 	}
2008 
2009 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
2010 		data[ei++] += stats->tx_bw[i];
2011 
2012 	for (i = 0; i < (eht ? 14 : 12); i++)
2013 		data[ei++] += stats->tx_mcs[i];
2014 
2015 	for (i = 0; i < 4; i++)
2016 		data[ei++] += stats->tx_nss[i];
2017 
2018 	wi->worker_stat_count = ei - wi->initial_stat_idx;
2019 }
2020 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
2021 
2022 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
2023 {
2024 #ifdef CONFIG_PAGE_POOL_STATS
2025 	struct page_pool_stats stats = {};
2026 	int i;
2027 
2028 	mt76_for_each_q_rx(dev, i)
2029 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
2030 
2031 	page_pool_ethtool_stats_get(data, &stats);
2032 	*index += page_pool_ethtool_stats_get_count();
2033 #endif
2034 }
2035 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
2036 
2037 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
2038 {
2039 	struct ieee80211_hw *hw = phy->hw;
2040 	struct mt76_dev *dev = phy->dev;
2041 
2042 	if (dev->region == NL80211_DFS_UNSET ||
2043 	    test_bit(MT76_SCANNING, &phy->state))
2044 		return MT_DFS_STATE_DISABLED;
2045 
2046 	if (!phy->radar_enabled) {
2047 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
2048 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
2049 			return MT_DFS_STATE_ACTIVE;
2050 
2051 		return MT_DFS_STATE_DISABLED;
2052 	}
2053 
2054 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
2055 		return MT_DFS_STATE_CAC;
2056 
2057 	return MT_DFS_STATE_ACTIVE;
2058 }
2059 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
2060 
2061 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
2062 {
2063 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2064 	struct mt76_vif_data *mvif = mlink->mvif;
2065 
2066 	rcu_assign_pointer(mvif->link[0], NULL);
2067 	mt76_abort_scan(dev);
2068 	if (mvif->roc_phy)
2069 		mt76_abort_roc(mvif->roc_phy);
2070 }
2071 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2072 
2073 u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links)
2074 {
2075 	unsigned long usable_links = ieee80211_vif_usable_links(vif);
2076 	struct  {
2077 		u8 link_id;
2078 		enum nl80211_band band;
2079 	} data[IEEE80211_MLD_MAX_NUM_LINKS];
2080 	unsigned int link_id;
2081 	int i, n_data = 0;
2082 	u16 sel_links = 0;
2083 
2084 	if (!ieee80211_vif_is_mld(vif))
2085 		return 0;
2086 
2087 	if (vif->active_links == usable_links)
2088 		return vif->active_links;
2089 
2090 	rcu_read_lock();
2091 	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
2092 		struct ieee80211_bss_conf *link_conf;
2093 
2094 		link_conf = rcu_dereference(vif->link_conf[link_id]);
2095 		if (WARN_ON_ONCE(!link_conf))
2096 			continue;
2097 
2098 		data[n_data].link_id = link_id;
2099 		data[n_data].band = link_conf->chanreq.oper.chan->band;
2100 		n_data++;
2101 	}
2102 	rcu_read_unlock();
2103 
2104 	for (i = 0; i < n_data; i++) {
2105 		int j;
2106 
2107 		if (!(BIT(data[i].link_id) & vif->active_links))
2108 			continue;
2109 
2110 		sel_links = BIT(data[i].link_id);
2111 		for (j = 0; j < n_data; j++) {
2112 			if (data[i].band != data[j].band) {
2113 				sel_links |= BIT(data[j].link_id);
2114 				if (hweight16(sel_links) == max_active_links)
2115 					break;
2116 			}
2117 		}
2118 		break;
2119 	}
2120 
2121 	return sel_links;
2122 }
2123 EXPORT_SYMBOL_GPL(mt76_select_links);
2124