xref: /linux/drivers/net/wireless/mediatek/mt76/mac80211.c (revision 6af91e3d2cfc8bb579b1aa2d22cd91f8c34acdf6)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 	struct device_node *np = dev->dev->of_node;
201 
202 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 		return 0;
204 
205 	np = of_get_child_by_name(np, "led");
206 	if (np) {
207 		if (!of_device_is_available(np)) {
208 			of_node_put(np);
209 			dev_info(dev->dev,
210 				"led registration was explicitly disabled by dts\n");
211 			return 0;
212 		}
213 
214 		if (phy == &dev->phy) {
215 			int led_pin;
216 
217 			if (!of_property_read_u32(np, "led-sources", &led_pin))
218 				phy->leds.pin = led_pin;
219 
220 			phy->leds.al =
221 				of_property_read_bool(np, "led-active-low");
222 		}
223 
224 		of_node_put(np);
225 	}
226 
227 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 		 wiphy_name(hw->wiphy));
229 
230 	phy->leds.cdev.name = phy->leds.name;
231 	phy->leds.cdev.default_trigger =
232 		ieee80211_create_tpt_led_trigger(hw,
233 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 					mt76_tpt_blink,
235 					ARRAY_SIZE(mt76_tpt_blink));
236 
237 	dev_info(dev->dev,
238 		"registering led '%s'\n", phy->leds.name);
239 
240 	return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242 
243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 		return;
247 
248 	led_classdev_unregister(&phy->leds.cdev);
249 }
250 
251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 				 struct ieee80211_supported_band *sband,
253 				 bool vht)
254 {
255 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 	int i, nstream = hweight8(phy->antenna_mask);
257 	struct ieee80211_sta_vht_cap *vht_cap;
258 	u16 mcs_map = 0;
259 
260 	if (nstream > 1)
261 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 	else
263 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264 
265 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267 
268 	if (!vht)
269 		return;
270 
271 	vht_cap = &sband->vht_cap;
272 	if (nstream > 1)
273 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 	else
275 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278 
279 	for (i = 0; i < 8; i++) {
280 		if (i < nstream)
281 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 		else
283 			mcs_map |=
284 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 	}
286 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 		vht_cap->vht_mcs.tx_highest |=
290 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292 
293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 	if (phy->cap.has_2ghz)
296 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 	if (phy->cap.has_5ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 	if (phy->cap.has_6ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303 
304 static int
305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 		const struct ieee80211_channel *chan, int n_chan,
307 		struct ieee80211_rate *rates, int n_rates,
308 		bool ht, bool vht)
309 {
310 	struct ieee80211_supported_band *sband = &msband->sband;
311 	struct ieee80211_sta_vht_cap *vht_cap;
312 	struct ieee80211_sta_ht_cap *ht_cap;
313 	struct mt76_dev *dev = phy->dev;
314 	void *chanlist;
315 	int size;
316 
317 	size = n_chan * sizeof(*chan);
318 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 	if (!chanlist)
320 		return -ENOMEM;
321 
322 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 				    GFP_KERNEL);
324 	if (!msband->chan)
325 		return -ENOMEM;
326 
327 	sband->channels = chanlist;
328 	sband->n_channels = n_chan;
329 	sband->bitrates = rates;
330 	sband->n_bitrates = n_rates;
331 
332 	if (!ht)
333 		return 0;
334 
335 	ht_cap = &sband->ht_cap;
336 	ht_cap->ht_supported = true;
337 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 		       IEEE80211_HT_CAP_GRN_FLD |
339 		       IEEE80211_HT_CAP_SGI_20 |
340 		       IEEE80211_HT_CAP_SGI_40 |
341 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342 
343 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345 
346 	mt76_init_stream_cap(phy, sband, vht);
347 
348 	if (!vht)
349 		return 0;
350 
351 	vht_cap = &sband->vht_cap;
352 	vht_cap->vht_supported = true;
353 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 			IEEE80211_VHT_CAP_RXSTBC_1 |
355 			IEEE80211_VHT_CAP_SHORT_GI_80 |
356 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357 
358 	return 0;
359 }
360 
361 static int
362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369 			       n_rates, true, false);
370 }
371 
372 static int
373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates, bool vht)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380 			       n_rates, true, vht);
381 }
382 
383 static int
384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 		   int n_rates)
386 {
387 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388 
389 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391 			       n_rates, false, false);
392 }
393 
394 static void
395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 		 enum nl80211_band band)
397 {
398 	struct ieee80211_supported_band *sband = &msband->sband;
399 	bool found = false;
400 	int i;
401 
402 	if (!sband)
403 		return;
404 
405 	for (i = 0; i < sband->n_channels; i++) {
406 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 			continue;
408 
409 		found = true;
410 		break;
411 	}
412 
413 	if (found) {
414 		phy->chandef.chan = &sband->channels[0];
415 		phy->chan_state = &msband->chan[0];
416 		return;
417 	}
418 
419 	sband->n_channels = 0;
420 	phy->hw->wiphy->bands[band] = NULL;
421 }
422 
423 static int
424 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
425 {
426 	struct mt76_dev *dev = phy->dev;
427 	struct wiphy *wiphy = hw->wiphy;
428 
429 	INIT_LIST_HEAD(&phy->tx_list);
430 	spin_lock_init(&phy->tx_lock);
431 
432 	SET_IEEE80211_DEV(hw, dev->dev);
433 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
434 
435 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
436 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
437 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
438 			WIPHY_FLAG_SUPPORTS_TDLS |
439 			WIPHY_FLAG_AP_UAPSD;
440 
441 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
442 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
443 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
444 
445 	wiphy->available_antennas_tx = phy->antenna_mask;
446 	wiphy->available_antennas_rx = phy->antenna_mask;
447 
448 	wiphy->sar_capa = &mt76_sar_capa;
449 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
450 				sizeof(struct mt76_freq_range_power),
451 				GFP_KERNEL);
452 	if (!phy->frp)
453 		return -ENOMEM;
454 
455 	hw->txq_data_size = sizeof(struct mt76_txq);
456 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
457 
458 	if (!hw->max_tx_fragments)
459 		hw->max_tx_fragments = 16;
460 
461 	ieee80211_hw_set(hw, SIGNAL_DBM);
462 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
463 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
464 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
465 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
466 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
467 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
468 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
469 
470 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
471 	    hw->max_tx_fragments > 1) {
472 		ieee80211_hw_set(hw, TX_AMSDU);
473 		ieee80211_hw_set(hw, TX_FRAG_LIST);
474 	}
475 
476 	ieee80211_hw_set(hw, MFP_CAPABLE);
477 	ieee80211_hw_set(hw, AP_LINK_PS);
478 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
479 
480 	return 0;
481 }
482 
483 struct mt76_phy *
484 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
485 	       const struct ieee80211_ops *ops, u8 band_idx)
486 {
487 	struct ieee80211_hw *hw;
488 	unsigned int phy_size;
489 	struct mt76_phy *phy;
490 
491 	phy_size = ALIGN(sizeof(*phy), 8);
492 	hw = ieee80211_alloc_hw(size + phy_size, ops);
493 	if (!hw)
494 		return NULL;
495 
496 	phy = hw->priv;
497 	phy->dev = dev;
498 	phy->hw = hw;
499 	phy->priv = hw->priv + phy_size;
500 	phy->band_idx = band_idx;
501 
502 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
503 	hw->wiphy->interface_modes =
504 		BIT(NL80211_IFTYPE_STATION) |
505 		BIT(NL80211_IFTYPE_AP) |
506 #ifdef CONFIG_MAC80211_MESH
507 		BIT(NL80211_IFTYPE_MESH_POINT) |
508 #endif
509 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
510 		BIT(NL80211_IFTYPE_P2P_GO) |
511 		BIT(NL80211_IFTYPE_ADHOC);
512 
513 	return phy;
514 }
515 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
516 
517 int mt76_register_phy(struct mt76_phy *phy, bool vht,
518 		      struct ieee80211_rate *rates, int n_rates)
519 {
520 	int ret;
521 
522 	ret = mt76_phy_init(phy, phy->hw);
523 	if (ret)
524 		return ret;
525 
526 	if (phy->cap.has_2ghz) {
527 		ret = mt76_init_sband_2g(phy, rates, n_rates);
528 		if (ret)
529 			return ret;
530 	}
531 
532 	if (phy->cap.has_5ghz) {
533 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
534 		if (ret)
535 			return ret;
536 	}
537 
538 	if (phy->cap.has_6ghz) {
539 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
540 		if (ret)
541 			return ret;
542 	}
543 
544 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
545 		ret = mt76_led_init(phy);
546 		if (ret)
547 			return ret;
548 	}
549 
550 	wiphy_read_of_freq_limits(phy->hw->wiphy);
551 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
552 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
553 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
554 
555 	ret = ieee80211_register_hw(phy->hw);
556 	if (ret)
557 		return ret;
558 
559 	set_bit(MT76_STATE_REGISTERED, &phy->state);
560 	phy->dev->phys[phy->band_idx] = phy;
561 
562 	return 0;
563 }
564 EXPORT_SYMBOL_GPL(mt76_register_phy);
565 
566 void mt76_unregister_phy(struct mt76_phy *phy)
567 {
568 	struct mt76_dev *dev = phy->dev;
569 
570 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
571 		return;
572 
573 	if (IS_ENABLED(CONFIG_MT76_LEDS))
574 		mt76_led_cleanup(phy);
575 	mt76_tx_status_check(dev, true);
576 	ieee80211_unregister_hw(phy->hw);
577 	dev->phys[phy->band_idx] = NULL;
578 }
579 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
580 
581 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
582 {
583 	bool is_qrx = mt76_queue_is_rx(dev, q);
584 	struct page_pool_params pp_params = {
585 		.order = 0,
586 		.flags = 0,
587 		.nid = NUMA_NO_NODE,
588 		.dev = dev->dma_dev,
589 	};
590 	int idx = is_qrx ? q - dev->q_rx : -1;
591 
592 	/* Allocate page_pools just for rx/wed_tx_free queues */
593 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
594 		return 0;
595 
596 	switch (idx) {
597 	case MT_RXQ_MAIN:
598 	case MT_RXQ_BAND1:
599 	case MT_RXQ_BAND2:
600 		pp_params.pool_size = 256;
601 		break;
602 	default:
603 		pp_params.pool_size = 16;
604 		break;
605 	}
606 
607 	if (mt76_is_mmio(dev)) {
608 		/* rely on page_pool for DMA mapping */
609 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
610 		pp_params.dma_dir = DMA_FROM_DEVICE;
611 		pp_params.max_len = PAGE_SIZE;
612 		pp_params.offset = 0;
613 		/* NAPI is available just for rx queues */
614 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
615 			pp_params.napi = &dev->napi[idx];
616 	}
617 
618 	q->page_pool = page_pool_create(&pp_params);
619 	if (IS_ERR(q->page_pool)) {
620 		int err = PTR_ERR(q->page_pool);
621 
622 		q->page_pool = NULL;
623 		return err;
624 	}
625 
626 	return 0;
627 }
628 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
629 
630 struct mt76_dev *
631 mt76_alloc_device(struct device *pdev, unsigned int size,
632 		  const struct ieee80211_ops *ops,
633 		  const struct mt76_driver_ops *drv_ops)
634 {
635 	struct ieee80211_hw *hw;
636 	struct mt76_phy *phy;
637 	struct mt76_dev *dev;
638 	int i;
639 
640 	hw = ieee80211_alloc_hw(size, ops);
641 	if (!hw)
642 		return NULL;
643 
644 	dev = hw->priv;
645 	dev->hw = hw;
646 	dev->dev = pdev;
647 	dev->drv = drv_ops;
648 	dev->dma_dev = pdev;
649 
650 	phy = &dev->phy;
651 	phy->dev = dev;
652 	phy->hw = hw;
653 	phy->band_idx = MT_BAND0;
654 	dev->phys[phy->band_idx] = phy;
655 
656 	spin_lock_init(&dev->rx_lock);
657 	spin_lock_init(&dev->lock);
658 	spin_lock_init(&dev->cc_lock);
659 	spin_lock_init(&dev->status_lock);
660 	spin_lock_init(&dev->wed_lock);
661 	mutex_init(&dev->mutex);
662 	init_waitqueue_head(&dev->tx_wait);
663 
664 	skb_queue_head_init(&dev->mcu.res_q);
665 	init_waitqueue_head(&dev->mcu.wait);
666 	mutex_init(&dev->mcu.mutex);
667 	dev->tx_worker.fn = mt76_tx_worker;
668 
669 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
670 	hw->wiphy->interface_modes =
671 		BIT(NL80211_IFTYPE_STATION) |
672 		BIT(NL80211_IFTYPE_AP) |
673 #ifdef CONFIG_MAC80211_MESH
674 		BIT(NL80211_IFTYPE_MESH_POINT) |
675 #endif
676 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
677 		BIT(NL80211_IFTYPE_P2P_GO) |
678 		BIT(NL80211_IFTYPE_ADHOC);
679 
680 	spin_lock_init(&dev->token_lock);
681 	idr_init(&dev->token);
682 
683 	spin_lock_init(&dev->rx_token_lock);
684 	idr_init(&dev->rx_token);
685 
686 	INIT_LIST_HEAD(&dev->wcid_list);
687 	INIT_LIST_HEAD(&dev->sta_poll_list);
688 	spin_lock_init(&dev->sta_poll_lock);
689 
690 	INIT_LIST_HEAD(&dev->txwi_cache);
691 	INIT_LIST_HEAD(&dev->rxwi_cache);
692 	dev->token_size = dev->drv->token_size;
693 
694 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
695 		skb_queue_head_init(&dev->rx_skb[i]);
696 
697 	dev->wq = alloc_ordered_workqueue("mt76", 0);
698 	if (!dev->wq) {
699 		ieee80211_free_hw(hw);
700 		return NULL;
701 	}
702 
703 	return dev;
704 }
705 EXPORT_SYMBOL_GPL(mt76_alloc_device);
706 
707 int mt76_register_device(struct mt76_dev *dev, bool vht,
708 			 struct ieee80211_rate *rates, int n_rates)
709 {
710 	struct ieee80211_hw *hw = dev->hw;
711 	struct mt76_phy *phy = &dev->phy;
712 	int ret;
713 
714 	dev_set_drvdata(dev->dev, dev);
715 	mt76_wcid_init(&dev->global_wcid);
716 	ret = mt76_phy_init(phy, hw);
717 	if (ret)
718 		return ret;
719 
720 	if (phy->cap.has_2ghz) {
721 		ret = mt76_init_sband_2g(phy, rates, n_rates);
722 		if (ret)
723 			return ret;
724 	}
725 
726 	if (phy->cap.has_5ghz) {
727 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
728 		if (ret)
729 			return ret;
730 	}
731 
732 	if (phy->cap.has_6ghz) {
733 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
734 		if (ret)
735 			return ret;
736 	}
737 
738 	wiphy_read_of_freq_limits(hw->wiphy);
739 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
740 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
741 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
742 
743 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
744 		ret = mt76_led_init(phy);
745 		if (ret)
746 			return ret;
747 	}
748 
749 	ret = ieee80211_register_hw(hw);
750 	if (ret)
751 		return ret;
752 
753 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
754 	set_bit(MT76_STATE_REGISTERED, &phy->state);
755 	sched_set_fifo_low(dev->tx_worker.task);
756 
757 	return 0;
758 }
759 EXPORT_SYMBOL_GPL(mt76_register_device);
760 
761 void mt76_unregister_device(struct mt76_dev *dev)
762 {
763 	struct ieee80211_hw *hw = dev->hw;
764 
765 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
766 		return;
767 
768 	if (IS_ENABLED(CONFIG_MT76_LEDS))
769 		mt76_led_cleanup(&dev->phy);
770 	mt76_tx_status_check(dev, true);
771 	mt76_wcid_cleanup(dev, &dev->global_wcid);
772 	ieee80211_unregister_hw(hw);
773 }
774 EXPORT_SYMBOL_GPL(mt76_unregister_device);
775 
776 void mt76_free_device(struct mt76_dev *dev)
777 {
778 	mt76_worker_teardown(&dev->tx_worker);
779 	if (dev->wq) {
780 		destroy_workqueue(dev->wq);
781 		dev->wq = NULL;
782 	}
783 	ieee80211_free_hw(dev->hw);
784 }
785 EXPORT_SYMBOL_GPL(mt76_free_device);
786 
787 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
788 {
789 	struct sk_buff *skb = phy->rx_amsdu[q].head;
790 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
791 	struct mt76_dev *dev = phy->dev;
792 
793 	phy->rx_amsdu[q].head = NULL;
794 	phy->rx_amsdu[q].tail = NULL;
795 
796 	/*
797 	 * Validate if the amsdu has a proper first subframe.
798 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
799 	 * flag of the QoS header gets flipped. In such cases, the first
800 	 * subframe has a LLC/SNAP header in the location of the destination
801 	 * address.
802 	 */
803 	if (skb_shinfo(skb)->frag_list) {
804 		int offset = 0;
805 
806 		if (!(status->flag & RX_FLAG_8023)) {
807 			offset = ieee80211_get_hdrlen_from_skb(skb);
808 
809 			if ((status->flag &
810 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
811 			    RX_FLAG_DECRYPTED)
812 				offset += 8;
813 		}
814 
815 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
816 			dev_kfree_skb(skb);
817 			return;
818 		}
819 	}
820 	__skb_queue_tail(&dev->rx_skb[q], skb);
821 }
822 
823 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
824 				  struct sk_buff *skb)
825 {
826 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
827 
828 	if (phy->rx_amsdu[q].head &&
829 	    (!status->amsdu || status->first_amsdu ||
830 	     status->seqno != phy->rx_amsdu[q].seqno))
831 		mt76_rx_release_amsdu(phy, q);
832 
833 	if (!phy->rx_amsdu[q].head) {
834 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
835 		phy->rx_amsdu[q].seqno = status->seqno;
836 		phy->rx_amsdu[q].head = skb;
837 	} else {
838 		*phy->rx_amsdu[q].tail = skb;
839 		phy->rx_amsdu[q].tail = &skb->next;
840 	}
841 
842 	if (!status->amsdu || status->last_amsdu)
843 		mt76_rx_release_amsdu(phy, q);
844 }
845 
846 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
847 {
848 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
849 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
850 
851 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
852 		dev_kfree_skb(skb);
853 		return;
854 	}
855 
856 #ifdef CONFIG_NL80211_TESTMODE
857 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
858 		phy->test.rx_stats.packets[q]++;
859 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
860 			phy->test.rx_stats.fcs_error[q]++;
861 	}
862 #endif
863 
864 	mt76_rx_release_burst(phy, q, skb);
865 }
866 EXPORT_SYMBOL_GPL(mt76_rx);
867 
868 bool mt76_has_tx_pending(struct mt76_phy *phy)
869 {
870 	struct mt76_queue *q;
871 	int i;
872 
873 	for (i = 0; i < __MT_TXQ_MAX; i++) {
874 		q = phy->q_tx[i];
875 		if (q && q->queued)
876 			return true;
877 	}
878 
879 	return false;
880 }
881 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
882 
883 static struct mt76_channel_state *
884 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
885 {
886 	struct mt76_sband *msband;
887 	int idx;
888 
889 	if (c->band == NL80211_BAND_2GHZ)
890 		msband = &phy->sband_2g;
891 	else if (c->band == NL80211_BAND_6GHZ)
892 		msband = &phy->sband_6g;
893 	else
894 		msband = &phy->sband_5g;
895 
896 	idx = c - &msband->sband.channels[0];
897 	return &msband->chan[idx];
898 }
899 
900 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
901 {
902 	struct mt76_channel_state *state = phy->chan_state;
903 
904 	state->cc_active += ktime_to_us(ktime_sub(time,
905 						  phy->survey_time));
906 	phy->survey_time = time;
907 }
908 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
909 
910 void mt76_update_survey(struct mt76_phy *phy)
911 {
912 	struct mt76_dev *dev = phy->dev;
913 	ktime_t cur_time;
914 
915 	if (dev->drv->update_survey)
916 		dev->drv->update_survey(phy);
917 
918 	cur_time = ktime_get_boottime();
919 	mt76_update_survey_active_time(phy, cur_time);
920 
921 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
922 		struct mt76_channel_state *state = phy->chan_state;
923 
924 		spin_lock_bh(&dev->cc_lock);
925 		state->cc_bss_rx += dev->cur_cc_bss_rx;
926 		dev->cur_cc_bss_rx = 0;
927 		spin_unlock_bh(&dev->cc_lock);
928 	}
929 }
930 EXPORT_SYMBOL_GPL(mt76_update_survey);
931 
932 void mt76_set_channel(struct mt76_phy *phy)
933 {
934 	struct mt76_dev *dev = phy->dev;
935 	struct ieee80211_hw *hw = phy->hw;
936 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
937 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
938 	int timeout = HZ / 5;
939 
940 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
941 	mt76_update_survey(phy);
942 
943 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
944 	    phy->chandef.width != chandef->width)
945 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
946 
947 	phy->chandef = *chandef;
948 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
949 
950 	if (!offchannel)
951 		phy->main_chan = chandef->chan;
952 
953 	if (chandef->chan != phy->main_chan)
954 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
955 }
956 EXPORT_SYMBOL_GPL(mt76_set_channel);
957 
958 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
959 		    struct survey_info *survey)
960 {
961 	struct mt76_phy *phy = hw->priv;
962 	struct mt76_dev *dev = phy->dev;
963 	struct mt76_sband *sband;
964 	struct ieee80211_channel *chan;
965 	struct mt76_channel_state *state;
966 	int ret = 0;
967 
968 	mutex_lock(&dev->mutex);
969 	if (idx == 0 && dev->drv->update_survey)
970 		mt76_update_survey(phy);
971 
972 	if (idx >= phy->sband_2g.sband.n_channels +
973 		   phy->sband_5g.sband.n_channels) {
974 		idx -= (phy->sband_2g.sband.n_channels +
975 			phy->sband_5g.sband.n_channels);
976 		sband = &phy->sband_6g;
977 	} else if (idx >= phy->sband_2g.sband.n_channels) {
978 		idx -= phy->sband_2g.sband.n_channels;
979 		sband = &phy->sband_5g;
980 	} else {
981 		sband = &phy->sband_2g;
982 	}
983 
984 	if (idx >= sband->sband.n_channels) {
985 		ret = -ENOENT;
986 		goto out;
987 	}
988 
989 	chan = &sband->sband.channels[idx];
990 	state = mt76_channel_state(phy, chan);
991 
992 	memset(survey, 0, sizeof(*survey));
993 	survey->channel = chan;
994 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
995 	survey->filled |= dev->drv->survey_flags;
996 	if (state->noise)
997 		survey->filled |= SURVEY_INFO_NOISE_DBM;
998 
999 	if (chan == phy->main_chan) {
1000 		survey->filled |= SURVEY_INFO_IN_USE;
1001 
1002 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1003 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1004 	}
1005 
1006 	survey->time_busy = div_u64(state->cc_busy, 1000);
1007 	survey->time_rx = div_u64(state->cc_rx, 1000);
1008 	survey->time = div_u64(state->cc_active, 1000);
1009 	survey->noise = state->noise;
1010 
1011 	spin_lock_bh(&dev->cc_lock);
1012 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1013 	survey->time_tx = div_u64(state->cc_tx, 1000);
1014 	spin_unlock_bh(&dev->cc_lock);
1015 
1016 out:
1017 	mutex_unlock(&dev->mutex);
1018 
1019 	return ret;
1020 }
1021 EXPORT_SYMBOL_GPL(mt76_get_survey);
1022 
1023 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1024 			 struct ieee80211_key_conf *key)
1025 {
1026 	struct ieee80211_key_seq seq;
1027 	int i;
1028 
1029 	wcid->rx_check_pn = false;
1030 
1031 	if (!key)
1032 		return;
1033 
1034 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1035 		return;
1036 
1037 	wcid->rx_check_pn = true;
1038 
1039 	/* data frame */
1040 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1041 		ieee80211_get_key_rx_seq(key, i, &seq);
1042 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1043 	}
1044 
1045 	/* robust management frame */
1046 	ieee80211_get_key_rx_seq(key, -1, &seq);
1047 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1048 
1049 }
1050 EXPORT_SYMBOL(mt76_wcid_key_setup);
1051 
1052 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1053 {
1054 	int signal = -128;
1055 	u8 chains;
1056 
1057 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1058 		int cur, diff;
1059 
1060 		cur = *chain_signal;
1061 		if (!(chains & BIT(0)) ||
1062 		    cur > 0)
1063 			continue;
1064 
1065 		if (cur > signal)
1066 			swap(cur, signal);
1067 
1068 		diff = signal - cur;
1069 		if (diff == 0)
1070 			signal += 3;
1071 		else if (diff <= 2)
1072 			signal += 2;
1073 		else if (diff <= 6)
1074 			signal += 1;
1075 	}
1076 
1077 	return signal;
1078 }
1079 EXPORT_SYMBOL(mt76_rx_signal);
1080 
1081 static void
1082 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1083 		struct ieee80211_hw **hw,
1084 		struct ieee80211_sta **sta)
1085 {
1086 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1087 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1088 	struct mt76_rx_status mstat;
1089 
1090 	mstat = *((struct mt76_rx_status *)skb->cb);
1091 	memset(status, 0, sizeof(*status));
1092 
1093 	status->flag = mstat.flag;
1094 	status->freq = mstat.freq;
1095 	status->enc_flags = mstat.enc_flags;
1096 	status->encoding = mstat.encoding;
1097 	status->bw = mstat.bw;
1098 	if (status->encoding == RX_ENC_EHT) {
1099 		status->eht.ru = mstat.eht.ru;
1100 		status->eht.gi = mstat.eht.gi;
1101 	} else {
1102 		status->he_ru = mstat.he_ru;
1103 		status->he_gi = mstat.he_gi;
1104 		status->he_dcm = mstat.he_dcm;
1105 	}
1106 	status->rate_idx = mstat.rate_idx;
1107 	status->nss = mstat.nss;
1108 	status->band = mstat.band;
1109 	status->signal = mstat.signal;
1110 	status->chains = mstat.chains;
1111 	status->ampdu_reference = mstat.ampdu_ref;
1112 	status->device_timestamp = mstat.timestamp;
1113 	status->mactime = mstat.timestamp;
1114 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1115 	if (status->signal <= -128)
1116 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1117 
1118 	if (ieee80211_is_beacon(hdr->frame_control) ||
1119 	    ieee80211_is_probe_resp(hdr->frame_control))
1120 		status->boottime_ns = ktime_get_boottime_ns();
1121 
1122 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1123 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1124 		     sizeof(mstat.chain_signal));
1125 	memcpy(status->chain_signal, mstat.chain_signal,
1126 	       sizeof(mstat.chain_signal));
1127 
1128 	if (mstat.wcid) {
1129 		status->link_valid = mstat.wcid->link_valid;
1130 		status->link_id = mstat.wcid->link_id;
1131 	}
1132 
1133 	*sta = wcid_to_sta(mstat.wcid);
1134 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1135 }
1136 
1137 static void
1138 mt76_check_ccmp_pn(struct sk_buff *skb)
1139 {
1140 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1141 	struct mt76_wcid *wcid = status->wcid;
1142 	struct ieee80211_hdr *hdr;
1143 	int security_idx;
1144 	int ret;
1145 
1146 	if (!(status->flag & RX_FLAG_DECRYPTED))
1147 		return;
1148 
1149 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1150 		return;
1151 
1152 	if (!wcid || !wcid->rx_check_pn)
1153 		return;
1154 
1155 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1156 	if (status->flag & RX_FLAG_8023)
1157 		goto skip_hdr_check;
1158 
1159 	hdr = mt76_skb_get_hdr(skb);
1160 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1161 		/*
1162 		 * Validate the first fragment both here and in mac80211
1163 		 * All further fragments will be validated by mac80211 only.
1164 		 */
1165 		if (ieee80211_is_frag(hdr) &&
1166 		    !ieee80211_is_first_frag(hdr->frame_control))
1167 			return;
1168 	}
1169 
1170 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1171 	 *
1172 	 * the recipient shall maintain a single replay counter for received
1173 	 * individually addressed robust Management frames that are received
1174 	 * with the To DS subfield equal to 0, [...]
1175 	 */
1176 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1177 	    !ieee80211_has_tods(hdr->frame_control))
1178 		security_idx = IEEE80211_NUM_TIDS;
1179 
1180 skip_hdr_check:
1181 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1182 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1183 		     sizeof(status->iv));
1184 	if (ret <= 0) {
1185 		status->flag |= RX_FLAG_ONLY_MONITOR;
1186 		return;
1187 	}
1188 
1189 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1190 
1191 	if (status->flag & RX_FLAG_IV_STRIPPED)
1192 		status->flag |= RX_FLAG_PN_VALIDATED;
1193 }
1194 
1195 static void
1196 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1197 		    int len)
1198 {
1199 	struct mt76_wcid *wcid = status->wcid;
1200 	struct ieee80211_rx_status info = {
1201 		.enc_flags = status->enc_flags,
1202 		.rate_idx = status->rate_idx,
1203 		.encoding = status->encoding,
1204 		.band = status->band,
1205 		.nss = status->nss,
1206 		.bw = status->bw,
1207 	};
1208 	struct ieee80211_sta *sta;
1209 	u32 airtime;
1210 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1211 
1212 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1213 	spin_lock(&dev->cc_lock);
1214 	dev->cur_cc_bss_rx += airtime;
1215 	spin_unlock(&dev->cc_lock);
1216 
1217 	if (!wcid || !wcid->sta)
1218 		return;
1219 
1220 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1221 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1222 }
1223 
1224 static void
1225 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1226 {
1227 	struct mt76_wcid *wcid;
1228 	int wcid_idx;
1229 
1230 	if (!dev->rx_ampdu_len)
1231 		return;
1232 
1233 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1234 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1235 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1236 	else
1237 		wcid = NULL;
1238 	dev->rx_ampdu_status.wcid = wcid;
1239 
1240 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1241 
1242 	dev->rx_ampdu_len = 0;
1243 	dev->rx_ampdu_ref = 0;
1244 }
1245 
1246 static void
1247 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1248 {
1249 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1250 	struct mt76_wcid *wcid = status->wcid;
1251 
1252 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1253 		return;
1254 
1255 	if (!wcid || !wcid->sta) {
1256 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1257 
1258 		if (status->flag & RX_FLAG_8023)
1259 			return;
1260 
1261 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1262 			return;
1263 
1264 		wcid = NULL;
1265 	}
1266 
1267 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1268 	    status->ampdu_ref != dev->rx_ampdu_ref)
1269 		mt76_airtime_flush_ampdu(dev);
1270 
1271 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1272 		if (!dev->rx_ampdu_len ||
1273 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1274 			dev->rx_ampdu_status = *status;
1275 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1276 			dev->rx_ampdu_ref = status->ampdu_ref;
1277 		}
1278 
1279 		dev->rx_ampdu_len += skb->len;
1280 		return;
1281 	}
1282 
1283 	mt76_airtime_report(dev, status, skb->len);
1284 }
1285 
1286 static void
1287 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1288 {
1289 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1290 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1291 	struct ieee80211_sta *sta;
1292 	struct ieee80211_hw *hw;
1293 	struct mt76_wcid *wcid = status->wcid;
1294 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1295 	bool ps;
1296 
1297 	hw = mt76_phy_hw(dev, status->phy_idx);
1298 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1299 	    !(status->flag & RX_FLAG_8023)) {
1300 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1301 		if (sta)
1302 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1303 	}
1304 
1305 	mt76_airtime_check(dev, skb);
1306 
1307 	if (!wcid || !wcid->sta)
1308 		return;
1309 
1310 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1311 
1312 	if (status->signal <= 0)
1313 		ewma_signal_add(&wcid->rssi, -status->signal);
1314 
1315 	wcid->inactive_count = 0;
1316 
1317 	if (status->flag & RX_FLAG_8023)
1318 		return;
1319 
1320 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1321 		return;
1322 
1323 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1324 		ieee80211_sta_pspoll(sta);
1325 		return;
1326 	}
1327 
1328 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1329 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1330 	      ieee80211_is_data(hdr->frame_control)))
1331 		return;
1332 
1333 	ps = ieee80211_has_pm(hdr->frame_control);
1334 
1335 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1336 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1337 		ieee80211_sta_uapsd_trigger(sta, tidno);
1338 
1339 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1340 		return;
1341 
1342 	if (ps)
1343 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1344 
1345 	if (dev->drv->sta_ps)
1346 		dev->drv->sta_ps(dev, sta, ps);
1347 
1348 	if (!ps)
1349 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1350 
1351 	ieee80211_sta_ps_transition(sta, ps);
1352 }
1353 
1354 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1355 		      struct napi_struct *napi)
1356 {
1357 	struct ieee80211_sta *sta;
1358 	struct ieee80211_hw *hw;
1359 	struct sk_buff *skb, *tmp;
1360 	LIST_HEAD(list);
1361 
1362 	spin_lock(&dev->rx_lock);
1363 	while ((skb = __skb_dequeue(frames)) != NULL) {
1364 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1365 
1366 		mt76_check_ccmp_pn(skb);
1367 		skb_shinfo(skb)->frag_list = NULL;
1368 		mt76_rx_convert(dev, skb, &hw, &sta);
1369 		ieee80211_rx_list(hw, sta, skb, &list);
1370 
1371 		/* subsequent amsdu frames */
1372 		while (nskb) {
1373 			skb = nskb;
1374 			nskb = nskb->next;
1375 			skb->next = NULL;
1376 
1377 			mt76_rx_convert(dev, skb, &hw, &sta);
1378 			ieee80211_rx_list(hw, sta, skb, &list);
1379 		}
1380 	}
1381 	spin_unlock(&dev->rx_lock);
1382 
1383 	if (!napi) {
1384 		netif_receive_skb_list(&list);
1385 		return;
1386 	}
1387 
1388 	list_for_each_entry_safe(skb, tmp, &list, list) {
1389 		skb_list_del_init(skb);
1390 		napi_gro_receive(napi, skb);
1391 	}
1392 }
1393 
1394 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1395 			   struct napi_struct *napi)
1396 {
1397 	struct sk_buff_head frames;
1398 	struct sk_buff *skb;
1399 
1400 	__skb_queue_head_init(&frames);
1401 
1402 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1403 		mt76_check_sta(dev, skb);
1404 		if (mtk_wed_device_active(&dev->mmio.wed))
1405 			__skb_queue_tail(&frames, skb);
1406 		else
1407 			mt76_rx_aggr_reorder(skb, &frames);
1408 	}
1409 
1410 	mt76_rx_complete(dev, &frames, napi);
1411 }
1412 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1413 
1414 static int
1415 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1416 	     struct ieee80211_sta *sta)
1417 {
1418 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1419 	struct mt76_dev *dev = phy->dev;
1420 	int ret;
1421 	int i;
1422 
1423 	mutex_lock(&dev->mutex);
1424 
1425 	ret = dev->drv->sta_add(dev, vif, sta);
1426 	if (ret)
1427 		goto out;
1428 
1429 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1430 		struct mt76_txq *mtxq;
1431 
1432 		if (!sta->txq[i])
1433 			continue;
1434 
1435 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1436 		mtxq->wcid = wcid->idx;
1437 	}
1438 
1439 	ewma_signal_init(&wcid->rssi);
1440 	if (phy->band_idx == MT_BAND1)
1441 		mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1442 	wcid->phy_idx = phy->band_idx;
1443 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1444 
1445 	mt76_wcid_init(wcid);
1446 out:
1447 	mutex_unlock(&dev->mutex);
1448 
1449 	return ret;
1450 }
1451 
1452 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1453 		       struct ieee80211_sta *sta)
1454 {
1455 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1456 	int i, idx = wcid->idx;
1457 
1458 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1459 		mt76_rx_aggr_stop(dev, wcid, i);
1460 
1461 	if (dev->drv->sta_remove)
1462 		dev->drv->sta_remove(dev, vif, sta);
1463 
1464 	mt76_wcid_cleanup(dev, wcid);
1465 
1466 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1467 	mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1468 }
1469 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1470 
1471 static void
1472 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1473 		struct ieee80211_sta *sta)
1474 {
1475 	mutex_lock(&dev->mutex);
1476 	__mt76_sta_remove(dev, vif, sta);
1477 	mutex_unlock(&dev->mutex);
1478 }
1479 
1480 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1481 		   struct ieee80211_sta *sta,
1482 		   enum ieee80211_sta_state old_state,
1483 		   enum ieee80211_sta_state new_state)
1484 {
1485 	struct mt76_phy *phy = hw->priv;
1486 	struct mt76_dev *dev = phy->dev;
1487 
1488 	if (old_state == IEEE80211_STA_NOTEXIST &&
1489 	    new_state == IEEE80211_STA_NONE)
1490 		return mt76_sta_add(phy, vif, sta);
1491 
1492 	if (old_state == IEEE80211_STA_AUTH &&
1493 	    new_state == IEEE80211_STA_ASSOC &&
1494 	    dev->drv->sta_assoc)
1495 		dev->drv->sta_assoc(dev, vif, sta);
1496 
1497 	if (old_state == IEEE80211_STA_NONE &&
1498 	    new_state == IEEE80211_STA_NOTEXIST)
1499 		mt76_sta_remove(dev, vif, sta);
1500 
1501 	return 0;
1502 }
1503 EXPORT_SYMBOL_GPL(mt76_sta_state);
1504 
1505 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1506 			     struct ieee80211_sta *sta)
1507 {
1508 	struct mt76_phy *phy = hw->priv;
1509 	struct mt76_dev *dev = phy->dev;
1510 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1511 
1512 	mutex_lock(&dev->mutex);
1513 	spin_lock_bh(&dev->status_lock);
1514 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1515 	spin_unlock_bh(&dev->status_lock);
1516 	mutex_unlock(&dev->mutex);
1517 }
1518 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1519 
1520 void mt76_wcid_init(struct mt76_wcid *wcid)
1521 {
1522 	INIT_LIST_HEAD(&wcid->tx_list);
1523 	skb_queue_head_init(&wcid->tx_pending);
1524 
1525 	INIT_LIST_HEAD(&wcid->list);
1526 	idr_init(&wcid->pktid);
1527 }
1528 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1529 
1530 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1531 {
1532 	struct mt76_phy *phy = dev->phys[wcid->phy_idx];
1533 	struct ieee80211_hw *hw;
1534 	struct sk_buff_head list;
1535 	struct sk_buff *skb;
1536 
1537 	mt76_tx_status_lock(dev, &list);
1538 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1539 	mt76_tx_status_unlock(dev, &list);
1540 
1541 	idr_destroy(&wcid->pktid);
1542 
1543 	spin_lock_bh(&phy->tx_lock);
1544 
1545 	if (!list_empty(&wcid->tx_list))
1546 		list_del_init(&wcid->tx_list);
1547 
1548 	spin_lock(&wcid->tx_pending.lock);
1549 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1550 	spin_unlock(&wcid->tx_pending.lock);
1551 
1552 	spin_unlock_bh(&phy->tx_lock);
1553 
1554 	while ((skb = __skb_dequeue(&list)) != NULL) {
1555 		hw = mt76_tx_status_get_hw(dev, skb);
1556 		ieee80211_free_txskb(hw, skb);
1557 	}
1558 }
1559 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1560 
1561 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1562 		     int *dbm)
1563 {
1564 	struct mt76_phy *phy = hw->priv;
1565 	int n_chains = hweight16(phy->chainmask);
1566 	int delta = mt76_tx_power_nss_delta(n_chains);
1567 
1568 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1569 
1570 	return 0;
1571 }
1572 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1573 
1574 int mt76_init_sar_power(struct ieee80211_hw *hw,
1575 			const struct cfg80211_sar_specs *sar)
1576 {
1577 	struct mt76_phy *phy = hw->priv;
1578 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1579 	int i;
1580 
1581 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1582 		return -EINVAL;
1583 
1584 	for (i = 0; i < sar->num_sub_specs; i++) {
1585 		u32 index = sar->sub_specs[i].freq_range_index;
1586 		/* SAR specifies power limitaton in 0.25dbm */
1587 		s32 power = sar->sub_specs[i].power >> 1;
1588 
1589 		if (power > 127 || power < -127)
1590 			power = 127;
1591 
1592 		phy->frp[index].range = &capa->freq_ranges[index];
1593 		phy->frp[index].power = power;
1594 	}
1595 
1596 	return 0;
1597 }
1598 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1599 
1600 int mt76_get_sar_power(struct mt76_phy *phy,
1601 		       struct ieee80211_channel *chan,
1602 		       int power)
1603 {
1604 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1605 	int freq, i;
1606 
1607 	if (!capa || !phy->frp)
1608 		return power;
1609 
1610 	if (power > 127 || power < -127)
1611 		power = 127;
1612 
1613 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1614 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1615 		if (phy->frp[i].range &&
1616 		    freq >= phy->frp[i].range->start_freq &&
1617 		    freq < phy->frp[i].range->end_freq) {
1618 			power = min_t(int, phy->frp[i].power, power);
1619 			break;
1620 		}
1621 	}
1622 
1623 	return power;
1624 }
1625 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1626 
1627 static void
1628 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1629 {
1630 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1631 		ieee80211_csa_finish(vif, 0);
1632 }
1633 
1634 void mt76_csa_finish(struct mt76_dev *dev)
1635 {
1636 	if (!dev->csa_complete)
1637 		return;
1638 
1639 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1640 		IEEE80211_IFACE_ITER_RESUME_ALL,
1641 		__mt76_csa_finish, dev);
1642 
1643 	dev->csa_complete = 0;
1644 }
1645 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1646 
1647 static void
1648 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1649 {
1650 	struct mt76_dev *dev = priv;
1651 
1652 	if (!vif->bss_conf.csa_active)
1653 		return;
1654 
1655 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1656 }
1657 
1658 void mt76_csa_check(struct mt76_dev *dev)
1659 {
1660 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1661 		IEEE80211_IFACE_ITER_RESUME_ALL,
1662 		__mt76_csa_check, dev);
1663 }
1664 EXPORT_SYMBOL_GPL(mt76_csa_check);
1665 
1666 int
1667 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1668 {
1669 	return 0;
1670 }
1671 EXPORT_SYMBOL_GPL(mt76_set_tim);
1672 
1673 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1674 {
1675 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1676 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1677 	u8 *hdr, *pn = status->iv;
1678 
1679 	__skb_push(skb, 8);
1680 	memmove(skb->data, skb->data + 8, hdr_len);
1681 	hdr = skb->data + hdr_len;
1682 
1683 	hdr[0] = pn[5];
1684 	hdr[1] = pn[4];
1685 	hdr[2] = 0;
1686 	hdr[3] = 0x20 | (key_id << 6);
1687 	hdr[4] = pn[3];
1688 	hdr[5] = pn[2];
1689 	hdr[6] = pn[1];
1690 	hdr[7] = pn[0];
1691 
1692 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1693 }
1694 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1695 
1696 int mt76_get_rate(struct mt76_dev *dev,
1697 		  struct ieee80211_supported_band *sband,
1698 		  int idx, bool cck)
1699 {
1700 	int i, offset = 0, len = sband->n_bitrates;
1701 
1702 	if (cck) {
1703 		if (sband != &dev->phy.sband_2g.sband)
1704 			return 0;
1705 
1706 		idx &= ~BIT(2); /* short preamble */
1707 	} else if (sband == &dev->phy.sband_2g.sband) {
1708 		offset = 4;
1709 	}
1710 
1711 	for (i = offset; i < len; i++) {
1712 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1713 			return i;
1714 	}
1715 
1716 	return 0;
1717 }
1718 EXPORT_SYMBOL_GPL(mt76_get_rate);
1719 
1720 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1721 		  const u8 *mac)
1722 {
1723 	struct mt76_phy *phy = hw->priv;
1724 
1725 	set_bit(MT76_SCANNING, &phy->state);
1726 }
1727 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1728 
1729 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1730 {
1731 	struct mt76_phy *phy = hw->priv;
1732 
1733 	clear_bit(MT76_SCANNING, &phy->state);
1734 }
1735 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1736 
1737 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1738 {
1739 	struct mt76_phy *phy = hw->priv;
1740 	struct mt76_dev *dev = phy->dev;
1741 
1742 	mutex_lock(&dev->mutex);
1743 	*tx_ant = phy->antenna_mask;
1744 	*rx_ant = phy->antenna_mask;
1745 	mutex_unlock(&dev->mutex);
1746 
1747 	return 0;
1748 }
1749 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1750 
1751 struct mt76_queue *
1752 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1753 		int ring_base, void *wed, u32 flags)
1754 {
1755 	struct mt76_queue *hwq;
1756 	int err;
1757 
1758 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1759 	if (!hwq)
1760 		return ERR_PTR(-ENOMEM);
1761 
1762 	hwq->flags = flags;
1763 	hwq->wed = wed;
1764 
1765 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1766 	if (err < 0)
1767 		return ERR_PTR(err);
1768 
1769 	return hwq;
1770 }
1771 EXPORT_SYMBOL_GPL(mt76_init_queue);
1772 
1773 u16 mt76_calculate_default_rate(struct mt76_phy *phy,
1774 				struct ieee80211_vif *vif, int rateidx)
1775 {
1776 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1777 	struct cfg80211_chan_def *chandef = mvif->ctx ?
1778 					    &mvif->ctx->def :
1779 					    &phy->chandef;
1780 	int offset = 0;
1781 
1782 	if (chandef->chan->band != NL80211_BAND_2GHZ)
1783 		offset = 4;
1784 
1785 	/* pick the lowest rate for hidden nodes */
1786 	if (rateidx < 0)
1787 		rateidx = 0;
1788 
1789 	rateidx += offset;
1790 	if (rateidx >= ARRAY_SIZE(mt76_rates))
1791 		rateidx = offset;
1792 
1793 	return mt76_rates[rateidx].hw_value;
1794 }
1795 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1796 
1797 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1798 			 struct mt76_sta_stats *stats, bool eht)
1799 {
1800 	int i, ei = wi->initial_stat_idx;
1801 	u64 *data = wi->data;
1802 
1803 	wi->sta_count++;
1804 
1805 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1806 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1807 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1808 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1809 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1810 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1811 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1812 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1813 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1814 	if (eht) {
1815 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1816 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1817 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1818 	}
1819 
1820 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1821 		data[ei++] += stats->tx_bw[i];
1822 
1823 	for (i = 0; i < (eht ? 14 : 12); i++)
1824 		data[ei++] += stats->tx_mcs[i];
1825 
1826 	for (i = 0; i < 4; i++)
1827 		data[ei++] += stats->tx_nss[i];
1828 
1829 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1830 }
1831 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1832 
1833 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1834 {
1835 #ifdef CONFIG_PAGE_POOL_STATS
1836 	struct page_pool_stats stats = {};
1837 	int i;
1838 
1839 	mt76_for_each_q_rx(dev, i)
1840 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1841 
1842 	page_pool_ethtool_stats_get(data, &stats);
1843 	*index += page_pool_ethtool_stats_get_count();
1844 #endif
1845 }
1846 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1847 
1848 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1849 {
1850 	struct ieee80211_hw *hw = phy->hw;
1851 	struct mt76_dev *dev = phy->dev;
1852 
1853 	if (dev->region == NL80211_DFS_UNSET ||
1854 	    test_bit(MT76_SCANNING, &phy->state))
1855 		return MT_DFS_STATE_DISABLED;
1856 
1857 	if (!hw->conf.radar_enabled) {
1858 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1859 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1860 			return MT_DFS_STATE_ACTIVE;
1861 
1862 		return MT_DFS_STATE_DISABLED;
1863 	}
1864 
1865 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1866 		return MT_DFS_STATE_CAC;
1867 
1868 	return MT_DFS_STATE_ACTIVE;
1869 }
1870 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1871