xref: /freebsd/sys/contrib/dev/mediatek/mt76/mac80211.c (revision b1bebaaba9b9c0ddfe503c43ca8e9e3917ee2c57)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #if defined(CONFIG_OF)
7 #include <linux/of.h>
8 #endif
9 #if defined(__FreeBSD__)
10 #include <linux/math64.h>
11 #include <linux/numa.h>
12 #endif
13 #include "mt76.h"
14 
15 #define CHAN2G(_idx, _freq) {			\
16 	.band = NL80211_BAND_2GHZ,		\
17 	.center_freq = (_freq),			\
18 	.hw_value = (_idx),			\
19 	.max_power = 30,			\
20 }
21 
22 #define CHAN5G(_idx, _freq) {			\
23 	.band = NL80211_BAND_5GHZ,		\
24 	.center_freq = (_freq),			\
25 	.hw_value = (_idx),			\
26 	.max_power = 30,			\
27 }
28 
29 #define CHAN6G(_idx, _freq) {			\
30 	.band = NL80211_BAND_6GHZ,		\
31 	.center_freq = (_freq),			\
32 	.hw_value = (_idx),			\
33 	.max_power = 30,			\
34 }
35 
36 static const struct ieee80211_channel mt76_channels_2ghz[] = {
37 	CHAN2G(1, 2412),
38 	CHAN2G(2, 2417),
39 	CHAN2G(3, 2422),
40 	CHAN2G(4, 2427),
41 	CHAN2G(5, 2432),
42 	CHAN2G(6, 2437),
43 	CHAN2G(7, 2442),
44 	CHAN2G(8, 2447),
45 	CHAN2G(9, 2452),
46 	CHAN2G(10, 2457),
47 	CHAN2G(11, 2462),
48 	CHAN2G(12, 2467),
49 	CHAN2G(13, 2472),
50 	CHAN2G(14, 2484),
51 };
52 
53 static const struct ieee80211_channel mt76_channels_5ghz[] = {
54 	CHAN5G(36, 5180),
55 	CHAN5G(40, 5200),
56 	CHAN5G(44, 5220),
57 	CHAN5G(48, 5240),
58 
59 	CHAN5G(52, 5260),
60 	CHAN5G(56, 5280),
61 	CHAN5G(60, 5300),
62 	CHAN5G(64, 5320),
63 
64 	CHAN5G(100, 5500),
65 	CHAN5G(104, 5520),
66 	CHAN5G(108, 5540),
67 	CHAN5G(112, 5560),
68 	CHAN5G(116, 5580),
69 	CHAN5G(120, 5600),
70 	CHAN5G(124, 5620),
71 	CHAN5G(128, 5640),
72 	CHAN5G(132, 5660),
73 	CHAN5G(136, 5680),
74 	CHAN5G(140, 5700),
75 	CHAN5G(144, 5720),
76 
77 	CHAN5G(149, 5745),
78 	CHAN5G(153, 5765),
79 	CHAN5G(157, 5785),
80 	CHAN5G(161, 5805),
81 	CHAN5G(165, 5825),
82 	CHAN5G(169, 5845),
83 	CHAN5G(173, 5865),
84 	CHAN5G(177, 5885),
85 };
86 
87 static const struct ieee80211_channel mt76_channels_6ghz[] = {
88 	/* UNII-5 */
89 	CHAN6G(1, 5955),
90 	CHAN6G(5, 5975),
91 	CHAN6G(9, 5995),
92 	CHAN6G(13, 6015),
93 	CHAN6G(17, 6035),
94 	CHAN6G(21, 6055),
95 	CHAN6G(25, 6075),
96 	CHAN6G(29, 6095),
97 	CHAN6G(33, 6115),
98 	CHAN6G(37, 6135),
99 	CHAN6G(41, 6155),
100 	CHAN6G(45, 6175),
101 	CHAN6G(49, 6195),
102 	CHAN6G(53, 6215),
103 	CHAN6G(57, 6235),
104 	CHAN6G(61, 6255),
105 	CHAN6G(65, 6275),
106 	CHAN6G(69, 6295),
107 	CHAN6G(73, 6315),
108 	CHAN6G(77, 6335),
109 	CHAN6G(81, 6355),
110 	CHAN6G(85, 6375),
111 	CHAN6G(89, 6395),
112 	CHAN6G(93, 6415),
113 	/* UNII-6 */
114 	CHAN6G(97, 6435),
115 	CHAN6G(101, 6455),
116 	CHAN6G(105, 6475),
117 	CHAN6G(109, 6495),
118 	CHAN6G(113, 6515),
119 	CHAN6G(117, 6535),
120 	/* UNII-7 */
121 	CHAN6G(121, 6555),
122 	CHAN6G(125, 6575),
123 	CHAN6G(129, 6595),
124 	CHAN6G(133, 6615),
125 	CHAN6G(137, 6635),
126 	CHAN6G(141, 6655),
127 	CHAN6G(145, 6675),
128 	CHAN6G(149, 6695),
129 	CHAN6G(153, 6715),
130 	CHAN6G(157, 6735),
131 	CHAN6G(161, 6755),
132 	CHAN6G(165, 6775),
133 	CHAN6G(169, 6795),
134 	CHAN6G(173, 6815),
135 	CHAN6G(177, 6835),
136 	CHAN6G(181, 6855),
137 	CHAN6G(185, 6875),
138 	/* UNII-8 */
139 	CHAN6G(189, 6895),
140 	CHAN6G(193, 6915),
141 	CHAN6G(197, 6935),
142 	CHAN6G(201, 6955),
143 	CHAN6G(205, 6975),
144 	CHAN6G(209, 6995),
145 	CHAN6G(213, 7015),
146 	CHAN6G(217, 7035),
147 	CHAN6G(221, 7055),
148 	CHAN6G(225, 7075),
149 	CHAN6G(229, 7095),
150 	CHAN6G(233, 7115),
151 };
152 
153 #if defined(CONFIG_MT76_LEDS)
154 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
155 	{ .throughput =   0 * 1024, .blink_time = 334 },
156 	{ .throughput =   1 * 1024, .blink_time = 260 },
157 	{ .throughput =   5 * 1024, .blink_time = 220 },
158 	{ .throughput =  10 * 1024, .blink_time = 190 },
159 	{ .throughput =  20 * 1024, .blink_time = 170 },
160 	{ .throughput =  50 * 1024, .blink_time = 150 },
161 	{ .throughput =  70 * 1024, .blink_time = 130 },
162 	{ .throughput = 100 * 1024, .blink_time = 110 },
163 	{ .throughput = 200 * 1024, .blink_time =  80 },
164 	{ .throughput = 300 * 1024, .blink_time =  50 },
165 };
166 #endif
167 
168 struct ieee80211_rate mt76_rates[] = {
169 	CCK_RATE(0, 10),
170 	CCK_RATE(1, 20),
171 	CCK_RATE(2, 55),
172 	CCK_RATE(3, 110),
173 	OFDM_RATE(11, 60),
174 	OFDM_RATE(15, 90),
175 	OFDM_RATE(10, 120),
176 	OFDM_RATE(14, 180),
177 	OFDM_RATE(9,  240),
178 	OFDM_RATE(13, 360),
179 	OFDM_RATE(8,  480),
180 	OFDM_RATE(12, 540),
181 };
182 EXPORT_SYMBOL_GPL(mt76_rates);
183 
184 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
185 	{ .start_freq = 2402, .end_freq = 2494, },
186 	{ .start_freq = 5150, .end_freq = 5350, },
187 	{ .start_freq = 5350, .end_freq = 5470, },
188 	{ .start_freq = 5470, .end_freq = 5725, },
189 	{ .start_freq = 5725, .end_freq = 5950, },
190 	{ .start_freq = 5945, .end_freq = 6165, },
191 	{ .start_freq = 6165, .end_freq = 6405, },
192 	{ .start_freq = 6405, .end_freq = 6525, },
193 	{ .start_freq = 6525, .end_freq = 6705, },
194 	{ .start_freq = 6705, .end_freq = 6865, },
195 	{ .start_freq = 6865, .end_freq = 7125, },
196 };
197 
198 static const struct cfg80211_sar_capa mt76_sar_capa = {
199 	.type = NL80211_SAR_TYPE_POWER,
200 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
201 	.freq_ranges = &mt76_sar_freq_ranges[0],
202 };
203 
204 #if defined(CONFIG_MT76_LEDS)
mt76_led_init(struct mt76_phy * phy)205 static int mt76_led_init(struct mt76_phy *phy)
206 {
207 	struct mt76_dev *dev = phy->dev;
208 	struct ieee80211_hw *hw = phy->hw;
209 	struct device_node *np = dev->dev->of_node;
210 
211 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
212 		return 0;
213 
214 	np = of_get_child_by_name(np, "led");
215 	if (np) {
216 		if (!of_device_is_available(np)) {
217 			of_node_put(np);
218 			dev_info(dev->dev,
219 				"led registration was explicitly disabled by dts\n");
220 			return 0;
221 		}
222 
223 		if (phy == &dev->phy) {
224 			int led_pin;
225 
226 			if (!of_property_read_u32(np, "led-sources", &led_pin))
227 				phy->leds.pin = led_pin;
228 
229 			phy->leds.al =
230 				of_property_read_bool(np, "led-active-low");
231 		}
232 
233 		of_node_put(np);
234 	}
235 
236 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
237 		 wiphy_name(hw->wiphy));
238 
239 	phy->leds.cdev.name = phy->leds.name;
240 	phy->leds.cdev.default_trigger =
241 		ieee80211_create_tpt_led_trigger(hw,
242 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
243 					mt76_tpt_blink,
244 					ARRAY_SIZE(mt76_tpt_blink));
245 
246 	dev_info(dev->dev,
247 		"registering led '%s'\n", phy->leds.name);
248 
249 	return led_classdev_register(dev->dev, &phy->leds.cdev);
250 }
251 
mt76_led_cleanup(struct mt76_phy * phy)252 static void mt76_led_cleanup(struct mt76_phy *phy)
253 {
254 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
255 		return;
256 
257 	led_classdev_unregister(&phy->leds.cdev);
258 }
259 #endif
260 
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)261 static void mt76_init_stream_cap(struct mt76_phy *phy,
262 				 struct ieee80211_supported_band *sband,
263 				 bool vht)
264 {
265 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
266 	int i, nstream = hweight8(phy->antenna_mask);
267 	struct ieee80211_sta_vht_cap *vht_cap;
268 	u16 mcs_map = 0;
269 
270 	if (nstream > 1)
271 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
272 	else
273 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
274 
275 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
276 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
277 
278 	if (!vht)
279 		return;
280 
281 	vht_cap = &sband->vht_cap;
282 	if (nstream > 1)
283 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
284 	else
285 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
286 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
287 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
288 
289 	for (i = 0; i < 8; i++) {
290 		if (i < nstream)
291 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
292 		else
293 			mcs_map |=
294 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
295 	}
296 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
297 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
298 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
299 		vht_cap->vht_mcs.tx_highest |=
300 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
301 }
302 
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)303 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
304 {
305 	if (phy->cap.has_2ghz)
306 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
307 	if (phy->cap.has_5ghz)
308 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
309 	if (phy->cap.has_6ghz)
310 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
311 }
312 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
313 
314 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)315 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
316 		const struct ieee80211_channel *chan, int n_chan,
317 		struct ieee80211_rate *rates, int n_rates,
318 		bool ht, bool vht)
319 {
320 	struct ieee80211_supported_band *sband = &msband->sband;
321 	struct ieee80211_sta_vht_cap *vht_cap;
322 	struct ieee80211_sta_ht_cap *ht_cap;
323 	struct mt76_dev *dev = phy->dev;
324 	void *chanlist;
325 	int size;
326 
327 	size = n_chan * sizeof(*chan);
328 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
329 	if (!chanlist)
330 		return -ENOMEM;
331 
332 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
333 				    GFP_KERNEL);
334 	if (!msband->chan)
335 		return -ENOMEM;
336 
337 	sband->channels = chanlist;
338 	sband->n_channels = n_chan;
339 	sband->bitrates = rates;
340 	sband->n_bitrates = n_rates;
341 
342 	if (!ht)
343 		return 0;
344 
345 	ht_cap = &sband->ht_cap;
346 	ht_cap->ht_supported = true;
347 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
348 		       IEEE80211_HT_CAP_GRN_FLD |
349 		       IEEE80211_HT_CAP_SGI_20 |
350 		       IEEE80211_HT_CAP_SGI_40 |
351 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
352 
353 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
354 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
355 
356 	mt76_init_stream_cap(phy, sband, vht);
357 
358 	if (!vht)
359 		return 0;
360 
361 	vht_cap = &sband->vht_cap;
362 	vht_cap->vht_supported = true;
363 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
364 			IEEE80211_VHT_CAP_RXSTBC_1 |
365 			IEEE80211_VHT_CAP_SHORT_GI_80 |
366 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
367 
368 	return 0;
369 }
370 
371 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)372 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
373 		   int n_rates)
374 {
375 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
376 
377 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
378 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
379 			       n_rates, true, false);
380 }
381 
382 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)383 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
384 		   int n_rates, bool vht)
385 {
386 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
387 
388 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
389 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
390 			       n_rates, true, vht);
391 }
392 
393 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)394 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
395 		   int n_rates)
396 {
397 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
398 
399 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
400 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
401 			       n_rates, false, false);
402 }
403 
404 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)405 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
406 		 enum nl80211_band band)
407 {
408 	struct ieee80211_supported_band *sband = &msband->sband;
409 	bool found = false;
410 	int i;
411 
412 	if (!sband)
413 		return;
414 
415 	for (i = 0; i < sband->n_channels; i++) {
416 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
417 			continue;
418 
419 		found = true;
420 		break;
421 	}
422 
423 	if (found) {
424 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
425 					NL80211_CHAN_HT20);
426 		phy->chan_state = &msband->chan[0];
427 		phy->dev->band_phys[band] = phy;
428 		return;
429 	}
430 
431 	sband->n_channels = 0;
432 	if (phy->hw->wiphy->bands[band] == sband)
433 		phy->hw->wiphy->bands[band] = NULL;
434 }
435 
436 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)437 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
438 {
439 	struct mt76_dev *dev = phy->dev;
440 	struct wiphy *wiphy = hw->wiphy;
441 
442 	INIT_LIST_HEAD(&phy->tx_list);
443 	spin_lock_init(&phy->tx_lock);
444 	INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
445 
446 	if ((void *)phy != hw->priv)
447 		return 0;
448 
449 	SET_IEEE80211_DEV(hw, dev->dev);
450 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
451 
452 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
453 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
454 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
455 			WIPHY_FLAG_SUPPORTS_TDLS |
456 			WIPHY_FLAG_AP_UAPSD;
457 
458 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
459 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
460 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
461 
462 	if (!wiphy->available_antennas_tx)
463 		wiphy->available_antennas_tx = phy->antenna_mask;
464 	if (!wiphy->available_antennas_rx)
465 		wiphy->available_antennas_rx = phy->antenna_mask;
466 
467 	wiphy->sar_capa = &mt76_sar_capa;
468 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
469 				sizeof(struct mt76_freq_range_power),
470 				GFP_KERNEL);
471 	if (!phy->frp)
472 		return -ENOMEM;
473 
474 	hw->txq_data_size = sizeof(struct mt76_txq);
475 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
476 
477 	if (!hw->max_tx_fragments)
478 		hw->max_tx_fragments = 16;
479 
480 	ieee80211_hw_set(hw, SIGNAL_DBM);
481 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
482 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
483 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
484 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
485 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
486 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
487 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
488 
489 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
490 	    hw->max_tx_fragments > 1) {
491 		ieee80211_hw_set(hw, TX_AMSDU);
492 		ieee80211_hw_set(hw, TX_FRAG_LIST);
493 	}
494 
495 	ieee80211_hw_set(hw, MFP_CAPABLE);
496 	ieee80211_hw_set(hw, AP_LINK_PS);
497 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
498 
499 	return 0;
500 }
501 
502 struct mt76_phy *
mt76_alloc_radio_phy(struct mt76_dev * dev,unsigned int size,u8 band_idx)503 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
504 		     u8 band_idx)
505 {
506 	struct ieee80211_hw *hw = dev->phy.hw;
507 	unsigned int phy_size;
508 	struct mt76_phy *phy;
509 
510 	phy_size = ALIGN(sizeof(*phy), 8);
511 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
512 	if (!phy)
513 		return NULL;
514 
515 	phy->dev = dev;
516 	phy->hw = hw;
517 #if defined(__linux__)
518 	phy->priv = (void *)phy + phy_size;
519 #elif defined(__FreeBSD__)
520 	phy->priv = (u8 *)phy + phy_size;
521 #endif
522 	phy->band_idx = band_idx;
523 
524 	return phy;
525 }
526 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
527 
528 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)529 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
530 	       const struct ieee80211_ops *ops, u8 band_idx)
531 {
532 	struct ieee80211_hw *hw;
533 	unsigned int phy_size;
534 	struct mt76_phy *phy;
535 
536 	phy_size = ALIGN(sizeof(*phy), 8);
537 	hw = ieee80211_alloc_hw(size + phy_size, ops);
538 	if (!hw)
539 		return NULL;
540 
541 	phy = hw->priv;
542 	phy->dev = dev;
543 	phy->hw = hw;
544 #if defined(__linux__)
545 	phy->priv = hw->priv + phy_size;
546 #elif defined(__FreeBSD__)
547 	phy->priv = (u8 *)hw->priv + phy_size;
548 #endif
549 	phy->band_idx = band_idx;
550 
551 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
552 	hw->wiphy->interface_modes =
553 		BIT(NL80211_IFTYPE_STATION) |
554 		BIT(NL80211_IFTYPE_AP) |
555 #ifdef CONFIG_MAC80211_MESH
556 		BIT(NL80211_IFTYPE_MESH_POINT) |
557 #endif
558 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
559 		BIT(NL80211_IFTYPE_P2P_GO) |
560 		BIT(NL80211_IFTYPE_ADHOC);
561 
562 	return phy;
563 }
564 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
565 
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)566 int mt76_register_phy(struct mt76_phy *phy, bool vht,
567 		      struct ieee80211_rate *rates, int n_rates)
568 {
569 	int ret;
570 
571 	ret = mt76_phy_init(phy, phy->hw);
572 	if (ret)
573 		return ret;
574 
575 	if (phy->cap.has_2ghz) {
576 		ret = mt76_init_sband_2g(phy, rates, n_rates);
577 		if (ret)
578 			return ret;
579 	}
580 
581 	if (phy->cap.has_5ghz) {
582 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
583 		if (ret)
584 			return ret;
585 	}
586 
587 	if (phy->cap.has_6ghz) {
588 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
589 		if (ret)
590 			return ret;
591 	}
592 
593 #if defined(CONFIG_MT76_LEDS)
594 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
595 		ret = mt76_led_init(phy);
596 		if (ret)
597 			return ret;
598 	}
599 #endif
600 
601 	wiphy_read_of_freq_limits(phy->hw->wiphy);
602 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
603 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
604 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
605 
606 	if ((void *)phy == phy->hw->priv) {
607 		ret = ieee80211_register_hw(phy->hw);
608 		if (ret)
609 			return ret;
610 	}
611 
612 	set_bit(MT76_STATE_REGISTERED, &phy->state);
613 	phy->dev->phys[phy->band_idx] = phy;
614 
615 	return 0;
616 }
617 EXPORT_SYMBOL_GPL(mt76_register_phy);
618 
mt76_unregister_phy(struct mt76_phy * phy)619 void mt76_unregister_phy(struct mt76_phy *phy)
620 {
621 	struct mt76_dev *dev = phy->dev;
622 
623 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
624 		return;
625 
626 #if defined(CONFIG_MT76_LEDS)
627 	if (IS_ENABLED(CONFIG_MT76_LEDS))
628 		mt76_led_cleanup(phy);
629 #endif
630 	mt76_tx_status_check(dev, true);
631 	ieee80211_unregister_hw(phy->hw);
632 	dev->phys[phy->band_idx] = NULL;
633 }
634 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
635 
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)636 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
637 {
638 	bool is_qrx = mt76_queue_is_rx(dev, q);
639 	struct page_pool_params pp_params = {
640 		.order = 0,
641 		.flags = 0,
642 		.nid = NUMA_NO_NODE,
643 		.dev = dev->dma_dev,
644 	};
645 	int idx = is_qrx ? q - dev->q_rx : -1;
646 
647 	/* Allocate page_pools just for rx/wed_tx_free queues */
648 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
649 		return 0;
650 
651 	switch (idx) {
652 	case MT_RXQ_MAIN:
653 	case MT_RXQ_BAND1:
654 	case MT_RXQ_BAND2:
655 	case MT_RXQ_NPU0:
656 	case MT_RXQ_NPU1:
657 		pp_params.pool_size = 256;
658 		break;
659 	default:
660 		pp_params.pool_size = 16;
661 		break;
662 	}
663 
664 	if (mt76_is_mmio(dev)) {
665 		/* rely on page_pool for DMA mapping */
666 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
667 		pp_params.dma_dir = DMA_FROM_DEVICE;
668 		pp_params.max_len = PAGE_SIZE;
669 		pp_params.offset = 0;
670 		/* NAPI is available just for rx queues */
671 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
672 			pp_params.napi = &dev->napi[idx];
673 	}
674 
675 	q->page_pool = page_pool_create(&pp_params);
676 	if (IS_ERR(q->page_pool)) {
677 		int err = PTR_ERR(q->page_pool);
678 
679 		q->page_pool = NULL;
680 		return err;
681 	}
682 
683 	return 0;
684 }
685 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
686 
687 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)688 mt76_alloc_device(struct device *pdev, unsigned int size,
689 		  const struct ieee80211_ops *ops,
690 		  const struct mt76_driver_ops *drv_ops)
691 {
692 	struct ieee80211_hw *hw;
693 	struct mt76_phy *phy;
694 	struct mt76_dev *dev;
695 	int i;
696 
697 	hw = ieee80211_alloc_hw(size, ops);
698 	if (!hw)
699 		return NULL;
700 
701 	dev = hw->priv;
702 	dev->hw = hw;
703 	dev->dev = pdev;
704 	dev->drv = drv_ops;
705 	dev->dma_dev = pdev;
706 
707 	phy = &dev->phy;
708 	phy->dev = dev;
709 	phy->hw = hw;
710 	phy->band_idx = MT_BAND0;
711 	dev->phys[phy->band_idx] = phy;
712 
713 	spin_lock_init(&dev->rx_lock);
714 	spin_lock_init(&dev->lock);
715 	spin_lock_init(&dev->cc_lock);
716 	spin_lock_init(&dev->status_lock);
717 	spin_lock_init(&dev->wed_lock);
718 	mutex_init(&dev->mutex);
719 	init_waitqueue_head(&dev->tx_wait);
720 
721 	skb_queue_head_init(&dev->mcu.res_q);
722 	init_waitqueue_head(&dev->mcu.wait);
723 	mutex_init(&dev->mcu.mutex);
724 	dev->tx_worker.fn = mt76_tx_worker;
725 
726 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
727 	hw->wiphy->interface_modes =
728 		BIT(NL80211_IFTYPE_STATION) |
729 		BIT(NL80211_IFTYPE_AP) |
730 #ifdef CONFIG_MAC80211_MESH
731 		BIT(NL80211_IFTYPE_MESH_POINT) |
732 #endif
733 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
734 		BIT(NL80211_IFTYPE_P2P_GO) |
735 		BIT(NL80211_IFTYPE_ADHOC);
736 
737 	spin_lock_init(&dev->token_lock);
738 	idr_init(&dev->token);
739 
740 	spin_lock_init(&dev->rx_token_lock);
741 	idr_init(&dev->rx_token);
742 
743 	INIT_LIST_HEAD(&dev->wcid_list);
744 	INIT_LIST_HEAD(&dev->sta_poll_list);
745 	spin_lock_init(&dev->sta_poll_lock);
746 
747 	INIT_LIST_HEAD(&dev->txwi_cache);
748 	INIT_LIST_HEAD(&dev->rxwi_cache);
749 	dev->token_size = dev->drv->token_size;
750 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
751 
752 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
753 		skb_queue_head_init(&dev->rx_skb[i]);
754 
755 	dev->wq = alloc_ordered_workqueue("mt76", 0);
756 	if (!dev->wq) {
757 		ieee80211_free_hw(hw);
758 		return NULL;
759 	}
760 
761 	return dev;
762 }
763 EXPORT_SYMBOL_GPL(mt76_alloc_device);
764 
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)765 int mt76_register_device(struct mt76_dev *dev, bool vht,
766 			 struct ieee80211_rate *rates, int n_rates)
767 {
768 	struct ieee80211_hw *hw = dev->hw;
769 	struct mt76_phy *phy = &dev->phy;
770 	int ret;
771 
772 	dev_set_drvdata(dev->dev, dev);
773 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
774 	ret = mt76_phy_init(phy, hw);
775 	if (ret)
776 		return ret;
777 
778 	if (phy->cap.has_2ghz) {
779 		ret = mt76_init_sband_2g(phy, rates, n_rates);
780 		if (ret)
781 			return ret;
782 	}
783 
784 	if (phy->cap.has_5ghz) {
785 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
786 		if (ret)
787 			return ret;
788 	}
789 
790 	if (phy->cap.has_6ghz) {
791 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
792 		if (ret)
793 			return ret;
794 	}
795 
796 	wiphy_read_of_freq_limits(hw->wiphy);
797 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
798 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
799 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
800 
801 #if defined(CONFIG_MT76_LEDS)
802 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
803 		ret = mt76_led_init(phy);
804 		if (ret)
805 			return ret;
806 	}
807 #endif
808 
809 	ret = ieee80211_register_hw(hw);
810 	if (ret)
811 		return ret;
812 
813 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
814 	set_bit(MT76_STATE_REGISTERED, &phy->state);
815 	sched_set_fifo_low(dev->tx_worker.task);
816 
817 	return 0;
818 }
819 EXPORT_SYMBOL_GPL(mt76_register_device);
820 
mt76_unregister_device(struct mt76_dev * dev)821 void mt76_unregister_device(struct mt76_dev *dev)
822 {
823 #if defined(__linux__)
824 	struct ieee80211_hw *hw = dev->hw;
825 #endif
826 
827 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
828 		return;
829 
830 #if defined(CONFIG_MT76_LEDS)
831 	if (IS_ENABLED(CONFIG_MT76_LEDS))
832 		mt76_led_cleanup(&dev->phy);
833 #endif
834 	mt76_tx_status_check(dev, true);
835 	mt76_wcid_cleanup(dev, &dev->global_wcid);
836 #if defined(__linux__)
837 	ieee80211_unregister_hw(hw);
838 #elif defined(__FreeBSD__)
839 	ieee80211_unregister_hw(dev->hw);
840 #endif
841 }
842 EXPORT_SYMBOL_GPL(mt76_unregister_device);
843 
mt76_free_device(struct mt76_dev * dev)844 void mt76_free_device(struct mt76_dev *dev)
845 {
846 	mt76_worker_teardown(&dev->tx_worker);
847 	if (dev->wq) {
848 		destroy_workqueue(dev->wq);
849 		dev->wq = NULL;
850 	}
851 	mt76_npu_deinit(dev);
852 	ieee80211_free_hw(dev->hw);
853 }
854 EXPORT_SYMBOL_GPL(mt76_free_device);
855 
mt76_reset_phy(struct mt76_phy * phy)856 static void mt76_reset_phy(struct mt76_phy *phy)
857 {
858 	if (!phy)
859 		return;
860 
861 	INIT_LIST_HEAD(&phy->tx_list);
862 	phy->num_sta = 0;
863 	phy->chanctx = NULL;
864 	mt76_roc_complete(phy);
865 }
866 
mt76_reset_device(struct mt76_dev * dev)867 void mt76_reset_device(struct mt76_dev *dev)
868 {
869 	int i;
870 
871 	rcu_read_lock();
872 	for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
873 		struct mt76_wcid *wcid;
874 
875 		wcid = rcu_dereference(dev->wcid[i]);
876 		if (!wcid)
877 			continue;
878 
879 		wcid->sta = 0;
880 		mt76_wcid_cleanup(dev, wcid);
881 		rcu_assign_pointer(dev->wcid[i], NULL);
882 	}
883 	rcu_read_unlock();
884 
885 	INIT_LIST_HEAD(&dev->wcid_list);
886 	INIT_LIST_HEAD(&dev->sta_poll_list);
887 	dev->vif_mask = 0;
888 	memset(dev->wcid_mask, 0, sizeof(dev->wcid_mask));
889 
890 	mt76_reset_phy(&dev->phy);
891 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
892 		mt76_reset_phy(dev->phys[i]);
893 }
894 EXPORT_SYMBOL_GPL(mt76_reset_device);
895 
mt76_vif_phy(struct ieee80211_hw * hw,struct ieee80211_vif * vif)896 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
897 			      struct ieee80211_vif *vif)
898 {
899 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
900 	struct mt76_chanctx *ctx;
901 
902 	if (!hw->wiphy->n_radio)
903 		return hw->priv;
904 
905 	if (!mlink->ctx)
906 		return NULL;
907 
908 	ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
909 	return ctx->phy;
910 }
911 EXPORT_SYMBOL_GPL(mt76_vif_phy);
912 
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)913 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
914 {
915 	struct sk_buff *skb = phy->rx_amsdu[q].head;
916 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
917 	struct mt76_dev *dev = phy->dev;
918 
919 	phy->rx_amsdu[q].head = NULL;
920 	phy->rx_amsdu[q].tail = NULL;
921 
922 	/*
923 	 * Validate if the amsdu has a proper first subframe.
924 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
925 	 * flag of the QoS header gets flipped. In such cases, the first
926 	 * subframe has a LLC/SNAP header in the location of the destination
927 	 * address.
928 	 */
929 	if (skb_shinfo(skb)->frag_list) {
930 		int offset = 0;
931 
932 		if (!(status->flag & RX_FLAG_8023)) {
933 			offset = ieee80211_get_hdrlen_from_skb(skb);
934 
935 			if ((status->flag &
936 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
937 			    RX_FLAG_DECRYPTED)
938 				offset += 8;
939 		}
940 
941 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
942 			dev_kfree_skb(skb);
943 			return;
944 		}
945 	}
946 	__skb_queue_tail(&dev->rx_skb[q], skb);
947 }
948 
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)949 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
950 				  struct sk_buff *skb)
951 {
952 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
953 
954 	if (phy->rx_amsdu[q].head &&
955 	    (!status->amsdu || status->first_amsdu ||
956 	     status->seqno != phy->rx_amsdu[q].seqno))
957 		mt76_rx_release_amsdu(phy, q);
958 
959 	if (!phy->rx_amsdu[q].head) {
960 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
961 		phy->rx_amsdu[q].seqno = status->seqno;
962 		phy->rx_amsdu[q].head = skb;
963 	} else {
964 		*phy->rx_amsdu[q].tail = skb;
965 		phy->rx_amsdu[q].tail = &skb->next;
966 	}
967 
968 	if (!status->amsdu || status->last_amsdu)
969 		mt76_rx_release_amsdu(phy, q);
970 }
971 
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)972 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
973 {
974 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
975 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
976 
977 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
978 		dev_kfree_skb(skb);
979 		return;
980 	}
981 
982 #ifdef CONFIG_NL80211_TESTMODE
983 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
984 		phy->test.rx_stats.packets[q]++;
985 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
986 			phy->test.rx_stats.fcs_error[q]++;
987 	}
988 #endif
989 
990 	mt76_rx_release_burst(phy, q, skb);
991 }
992 EXPORT_SYMBOL_GPL(mt76_rx);
993 
mt76_has_tx_pending(struct mt76_phy * phy)994 bool mt76_has_tx_pending(struct mt76_phy *phy)
995 {
996 	struct mt76_queue *q;
997 	int i;
998 
999 	for (i = 0; i < __MT_TXQ_MAX; i++) {
1000 		q = phy->q_tx[i];
1001 		if (q && q->queued)
1002 			return true;
1003 	}
1004 
1005 	return false;
1006 }
1007 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
1008 
1009 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)1010 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
1011 {
1012 	struct mt76_sband *msband;
1013 	int idx;
1014 
1015 	if (c->band == NL80211_BAND_2GHZ)
1016 		msband = &phy->sband_2g;
1017 	else if (c->band == NL80211_BAND_6GHZ)
1018 		msband = &phy->sband_6g;
1019 	else
1020 		msband = &phy->sband_5g;
1021 
1022 	idx = c - &msband->sband.channels[0];
1023 	return &msband->chan[idx];
1024 }
1025 
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)1026 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
1027 {
1028 	struct mt76_channel_state *state = phy->chan_state;
1029 
1030 	state->cc_active += ktime_to_us(ktime_sub(time,
1031 						  phy->survey_time));
1032 	phy->survey_time = time;
1033 }
1034 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
1035 
mt76_update_survey(struct mt76_phy * phy)1036 void mt76_update_survey(struct mt76_phy *phy)
1037 {
1038 	struct mt76_dev *dev = phy->dev;
1039 	ktime_t cur_time;
1040 
1041 	if (dev->drv->update_survey)
1042 		dev->drv->update_survey(phy);
1043 
1044 	cur_time = ktime_get_boottime();
1045 	mt76_update_survey_active_time(phy, cur_time);
1046 
1047 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
1048 		struct mt76_channel_state *state = phy->chan_state;
1049 
1050 		spin_lock_bh(&dev->cc_lock);
1051 		state->cc_bss_rx += dev->cur_cc_bss_rx;
1052 		dev->cur_cc_bss_rx = 0;
1053 		spin_unlock_bh(&dev->cc_lock);
1054 	}
1055 }
1056 EXPORT_SYMBOL_GPL(mt76_update_survey);
1057 
__mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1058 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1059 		       bool offchannel)
1060 {
1061 	struct mt76_dev *dev = phy->dev;
1062 	int timeout = HZ / 5;
1063 	int ret;
1064 
1065 	set_bit(MT76_RESET, &phy->state);
1066 
1067 	mt76_worker_disable(&dev->tx_worker);
1068 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
1069 	mt76_update_survey(phy);
1070 
1071 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
1072 	    phy->chandef.width != chandef->width)
1073 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
1074 
1075 	phy->chandef = *chandef;
1076 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
1077 	phy->offchannel = offchannel;
1078 
1079 	if (!offchannel)
1080 		phy->main_chandef = *chandef;
1081 
1082 	if (chandef->chan != phy->main_chandef.chan)
1083 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1084 
1085 	ret = dev->drv->set_channel(phy);
1086 
1087 	clear_bit(MT76_RESET, &phy->state);
1088 	mt76_worker_enable(&dev->tx_worker);
1089 	mt76_worker_schedule(&dev->tx_worker);
1090 
1091 	return ret;
1092 }
1093 
mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1094 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1095 		     bool offchannel)
1096 {
1097 	struct mt76_dev *dev = phy->dev;
1098 	int ret;
1099 
1100 	cancel_delayed_work_sync(&phy->mac_work);
1101 
1102 	mutex_lock(&dev->mutex);
1103 	ret = __mt76_set_channel(phy, chandef, offchannel);
1104 	mutex_unlock(&dev->mutex);
1105 
1106 	return ret;
1107 }
1108 
mt76_update_channel(struct mt76_phy * phy)1109 int mt76_update_channel(struct mt76_phy *phy)
1110 {
1111 	struct ieee80211_hw *hw = phy->hw;
1112 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1113 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1114 
1115 	phy->radar_enabled = hw->conf.radar_enabled;
1116 
1117 	return mt76_set_channel(phy, chandef, offchannel);
1118 }
1119 EXPORT_SYMBOL_GPL(mt76_update_channel);
1120 
1121 static struct mt76_sband *
mt76_get_survey_sband(struct mt76_phy * phy,int * idx)1122 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1123 {
1124 	if (*idx < phy->sband_2g.sband.n_channels)
1125 		return &phy->sband_2g;
1126 
1127 	*idx -= phy->sband_2g.sband.n_channels;
1128 	if (*idx < phy->sband_5g.sband.n_channels)
1129 		return &phy->sband_5g;
1130 
1131 	*idx -= phy->sband_5g.sband.n_channels;
1132 	if (*idx < phy->sband_6g.sband.n_channels)
1133 		return &phy->sband_6g;
1134 
1135 	*idx -= phy->sband_6g.sband.n_channels;
1136 	return NULL;
1137 }
1138 
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1139 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1140 		    struct survey_info *survey)
1141 {
1142 	struct mt76_phy *phy = hw->priv;
1143 	struct mt76_dev *dev = phy->dev;
1144 	struct mt76_sband *sband = NULL;
1145 	struct ieee80211_channel *chan;
1146 	struct mt76_channel_state *state;
1147 	int phy_idx = 0;
1148 	int ret = 0;
1149 
1150 	mutex_lock(&dev->mutex);
1151 
1152 	for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1153 		sband = NULL;
1154 		phy = dev->phys[phy_idx];
1155 		if (!phy || phy->hw != hw)
1156 			continue;
1157 
1158 		sband = mt76_get_survey_sband(phy, &idx);
1159 
1160 		if (idx == 0 && phy->dev->drv->update_survey)
1161 			mt76_update_survey(phy);
1162 
1163 		if (sband || !hw->wiphy->n_radio)
1164 			break;
1165 	}
1166 
1167 	if (!sband) {
1168 		ret = -ENOENT;
1169 		goto out;
1170 	}
1171 
1172 	chan = &sband->sband.channels[idx];
1173 	state = mt76_channel_state(phy, chan);
1174 
1175 	memset(survey, 0, sizeof(*survey));
1176 	survey->channel = chan;
1177 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1178 	survey->filled |= dev->drv->survey_flags;
1179 	if (state->noise)
1180 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1181 
1182 	if (chan == phy->main_chandef.chan) {
1183 		survey->filled |= SURVEY_INFO_IN_USE;
1184 
1185 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1186 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1187 	}
1188 
1189 	survey->time_busy = div_u64(state->cc_busy, 1000);
1190 	survey->time_rx = div_u64(state->cc_rx, 1000);
1191 	survey->time = div_u64(state->cc_active, 1000);
1192 	survey->noise = state->noise;
1193 
1194 	spin_lock_bh(&dev->cc_lock);
1195 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1196 	survey->time_tx = div_u64(state->cc_tx, 1000);
1197 	spin_unlock_bh(&dev->cc_lock);
1198 
1199 out:
1200 	mutex_unlock(&dev->mutex);
1201 
1202 	return ret;
1203 }
1204 EXPORT_SYMBOL_GPL(mt76_get_survey);
1205 
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1206 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1207 			 struct ieee80211_key_conf *key)
1208 {
1209 	struct ieee80211_key_seq seq;
1210 	int i;
1211 
1212 	wcid->rx_check_pn = false;
1213 
1214 	if (!key)
1215 		return;
1216 
1217 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1218 		return;
1219 
1220 	wcid->rx_check_pn = true;
1221 
1222 	/* data frame */
1223 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1224 		ieee80211_get_key_rx_seq(key, i, &seq);
1225 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1226 	}
1227 
1228 	/* robust management frame */
1229 	ieee80211_get_key_rx_seq(key, -1, &seq);
1230 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1231 
1232 }
1233 EXPORT_SYMBOL(mt76_wcid_key_setup);
1234 
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1235 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1236 {
1237 	int signal = -128;
1238 	u8 chains;
1239 
1240 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1241 		int cur, diff;
1242 
1243 		cur = *chain_signal;
1244 		if (!(chains & BIT(0)) ||
1245 		    cur > 0)
1246 			continue;
1247 
1248 		if (cur > signal)
1249 			swap(cur, signal);
1250 
1251 		diff = signal - cur;
1252 		if (diff == 0)
1253 			signal += 3;
1254 		else if (diff <= 2)
1255 			signal += 2;
1256 		else if (diff <= 6)
1257 			signal += 1;
1258 	}
1259 
1260 	return signal;
1261 }
1262 EXPORT_SYMBOL(mt76_rx_signal);
1263 
1264 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1265 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1266 		struct ieee80211_hw **hw,
1267 		struct ieee80211_sta **sta)
1268 {
1269 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1270 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1271 	struct mt76_rx_status mstat;
1272 
1273 	mstat = *((struct mt76_rx_status *)skb->cb);
1274 	memset(status, 0, sizeof(*status));
1275 
1276 	skb->priority = mstat.qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1277 
1278 	status->flag = mstat.flag;
1279 	status->freq = mstat.freq;
1280 	status->enc_flags = mstat.enc_flags;
1281 	status->encoding = mstat.encoding;
1282 	status->bw = mstat.bw;
1283 	if (status->encoding == RX_ENC_EHT) {
1284 		status->eht.ru = mstat.eht.ru;
1285 		status->eht.gi = mstat.eht.gi;
1286 	} else {
1287 		status->he_ru = mstat.he_ru;
1288 		status->he_gi = mstat.he_gi;
1289 		status->he_dcm = mstat.he_dcm;
1290 	}
1291 	status->rate_idx = mstat.rate_idx;
1292 	status->nss = mstat.nss;
1293 	status->band = mstat.band;
1294 	status->signal = mstat.signal;
1295 	status->chains = mstat.chains;
1296 	status->ampdu_reference = mstat.ampdu_ref;
1297 	status->device_timestamp = mstat.timestamp;
1298 	status->mactime = mstat.timestamp;
1299 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1300 	if (status->signal <= -128)
1301 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1302 
1303 	if (ieee80211_is_beacon(hdr->frame_control) ||
1304 	    ieee80211_is_probe_resp(hdr->frame_control))
1305 		status->boottime_ns = ktime_get_boottime_ns();
1306 
1307 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1308 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1309 		     sizeof(mstat.chain_signal));
1310 	memcpy(status->chain_signal, mstat.chain_signal,
1311 	       sizeof(mstat.chain_signal));
1312 
1313 	if (mstat.wcid) {
1314 		status->link_valid = mstat.wcid->link_valid;
1315 		status->link_id = mstat.wcid->link_id;
1316 	}
1317 
1318 	*sta = wcid_to_sta(mstat.wcid);
1319 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1320 }
1321 
1322 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1323 mt76_check_ccmp_pn(struct sk_buff *skb)
1324 {
1325 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1326 	struct mt76_wcid *wcid = status->wcid;
1327 	struct ieee80211_hdr *hdr;
1328 	int security_idx;
1329 	int ret;
1330 
1331 	if (!(status->flag & RX_FLAG_DECRYPTED))
1332 		return;
1333 
1334 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1335 		return;
1336 
1337 	if (!wcid || !wcid->rx_check_pn)
1338 		return;
1339 
1340 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1341 	if (status->flag & RX_FLAG_8023)
1342 		goto skip_hdr_check;
1343 
1344 	hdr = mt76_skb_get_hdr(skb);
1345 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1346 		/*
1347 		 * Validate the first fragment both here and in mac80211
1348 		 * All further fragments will be validated by mac80211 only.
1349 		 */
1350 		if (ieee80211_is_frag(hdr) &&
1351 		    !ieee80211_is_first_frag(hdr->seq_ctrl))
1352 			return;
1353 	}
1354 
1355 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1356 	 *
1357 	 * the recipient shall maintain a single replay counter for received
1358 	 * individually addressed robust Management frames that are received
1359 	 * with the To DS subfield equal to 0, [...]
1360 	 */
1361 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1362 	    !ieee80211_has_tods(hdr->frame_control))
1363 		security_idx = IEEE80211_NUM_TIDS;
1364 
1365 skip_hdr_check:
1366 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1367 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1368 		     sizeof(status->iv));
1369 	if (ret <= 0) {
1370 		status->flag |= RX_FLAG_ONLY_MONITOR;
1371 		return;
1372 	}
1373 
1374 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1375 
1376 	if (status->flag & RX_FLAG_IV_STRIPPED)
1377 		status->flag |= RX_FLAG_PN_VALIDATED;
1378 }
1379 
1380 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1381 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1382 		    int len)
1383 {
1384 	struct mt76_wcid *wcid = status->wcid;
1385 	struct ieee80211_rx_status info = {
1386 		.enc_flags = status->enc_flags,
1387 		.rate_idx = status->rate_idx,
1388 		.encoding = status->encoding,
1389 		.band = status->band,
1390 		.nss = status->nss,
1391 		.bw = status->bw,
1392 	};
1393 	struct ieee80211_sta *sta;
1394 	u32 airtime;
1395 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1396 
1397 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1398 	spin_lock(&dev->cc_lock);
1399 	dev->cur_cc_bss_rx += airtime;
1400 	spin_unlock(&dev->cc_lock);
1401 
1402 	if (!wcid || !wcid->sta)
1403 		return;
1404 
1405 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1406 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1407 }
1408 
1409 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1410 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1411 {
1412 	struct mt76_wcid *wcid;
1413 	int wcid_idx;
1414 
1415 	if (!dev->rx_ampdu_len)
1416 		return;
1417 
1418 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1419 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1420 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1421 	else
1422 		wcid = NULL;
1423 	dev->rx_ampdu_status.wcid = wcid;
1424 
1425 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1426 
1427 	dev->rx_ampdu_len = 0;
1428 	dev->rx_ampdu_ref = 0;
1429 }
1430 
1431 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1432 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1433 {
1434 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1435 	struct mt76_wcid *wcid = status->wcid;
1436 
1437 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1438 		return;
1439 
1440 	if (!wcid || !wcid->sta) {
1441 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1442 
1443 		if (status->flag & RX_FLAG_8023)
1444 			return;
1445 
1446 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1447 			return;
1448 
1449 		wcid = NULL;
1450 	}
1451 
1452 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1453 	    status->ampdu_ref != dev->rx_ampdu_ref)
1454 		mt76_airtime_flush_ampdu(dev);
1455 
1456 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1457 		if (!dev->rx_ampdu_len ||
1458 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1459 			dev->rx_ampdu_status = *status;
1460 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1461 			dev->rx_ampdu_ref = status->ampdu_ref;
1462 		}
1463 
1464 		dev->rx_ampdu_len += skb->len;
1465 		return;
1466 	}
1467 
1468 	mt76_airtime_report(dev, status, skb->len);
1469 }
1470 
1471 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1472 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1473 {
1474 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1475 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1476 	struct ieee80211_sta *sta;
1477 	struct ieee80211_hw *hw;
1478 	struct mt76_wcid *wcid = status->wcid;
1479 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1480 	bool ps;
1481 
1482 	hw = mt76_phy_hw(dev, status->phy_idx);
1483 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1484 	    !(status->flag & RX_FLAG_8023)) {
1485 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1486 		if (sta)
1487 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1488 	}
1489 
1490 	mt76_airtime_check(dev, skb);
1491 
1492 	if (!wcid || !wcid->sta)
1493 		return;
1494 
1495 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1496 
1497 	if (status->signal <= 0)
1498 		ewma_signal_add(&wcid->rssi, -status->signal);
1499 
1500 	wcid->inactive_count = 0;
1501 
1502 	if (status->flag & RX_FLAG_8023)
1503 		return;
1504 
1505 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1506 		return;
1507 
1508 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1509 		ieee80211_sta_pspoll(sta);
1510 		return;
1511 	}
1512 
1513 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1514 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1515 	      ieee80211_is_data(hdr->frame_control)))
1516 		return;
1517 
1518 	ps = ieee80211_has_pm(hdr->frame_control);
1519 
1520 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1521 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1522 		ieee80211_sta_uapsd_trigger(sta, tidno);
1523 
1524 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1525 		return;
1526 
1527 	if (ps)
1528 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1529 
1530 	if (dev->drv->sta_ps)
1531 		dev->drv->sta_ps(dev, sta, ps);
1532 
1533 	if (!ps)
1534 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1535 
1536 	ieee80211_sta_ps_transition(sta, ps);
1537 }
1538 
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1539 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1540 		      struct napi_struct *napi)
1541 {
1542 	struct ieee80211_sta *sta;
1543 	struct ieee80211_hw *hw;
1544 	struct sk_buff *skb, *tmp;
1545 #if defined(__linux__)
1546 	LIST_HEAD(list);
1547 #elif defined(__FreeBSD__)
1548 	LINUX_LIST_HEAD(list);
1549 #endif
1550 
1551 	spin_lock(&dev->rx_lock);
1552 	while ((skb = __skb_dequeue(frames)) != NULL) {
1553 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1554 
1555 		mt76_check_ccmp_pn(skb);
1556 		skb_shinfo(skb)->frag_list = NULL;
1557 		mt76_rx_convert(dev, skb, &hw, &sta);
1558 		ieee80211_rx_list(hw, sta, skb, &list);
1559 
1560 		/* subsequent amsdu frames */
1561 		while (nskb) {
1562 			skb = nskb;
1563 			nskb = nskb->next;
1564 			skb->next = NULL;
1565 
1566 			mt76_rx_convert(dev, skb, &hw, &sta);
1567 			ieee80211_rx_list(hw, sta, skb, &list);
1568 		}
1569 	}
1570 	spin_unlock(&dev->rx_lock);
1571 
1572 	if (!napi) {
1573 		netif_receive_skb_list(&list);
1574 		return;
1575 	}
1576 
1577 	list_for_each_entry_safe(skb, tmp, &list, list) {
1578 		skb_list_del_init(skb);
1579 		napi_gro_receive(napi, skb);
1580 	}
1581 }
1582 
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1583 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1584 			   struct napi_struct *napi)
1585 {
1586 	struct sk_buff_head frames;
1587 	struct sk_buff *skb;
1588 
1589 	__skb_queue_head_init(&frames);
1590 
1591 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1592 		mt76_check_sta(dev, skb);
1593 		if (mtk_wed_device_active(&dev->mmio.wed) ||
1594 		    mt76_npu_device_active(dev))
1595 			__skb_queue_tail(&frames, skb);
1596 		else
1597 			mt76_rx_aggr_reorder(skb, &frames);
1598 	}
1599 
1600 	mt76_rx_complete(dev, &frames, napi);
1601 }
1602 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1603 
1604 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1605 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1606 	     struct ieee80211_sta *sta)
1607 {
1608 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1609 	struct mt76_dev *dev = phy->dev;
1610 	int ret;
1611 	int i;
1612 
1613 	mutex_lock(&dev->mutex);
1614 
1615 	ret = dev->drv->sta_add(dev, vif, sta);
1616 	if (ret)
1617 		goto out;
1618 
1619 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1620 		struct mt76_txq *mtxq;
1621 
1622 		if (!sta->txq[i])
1623 			continue;
1624 
1625 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1626 		mtxq->wcid = wcid->idx;
1627 	}
1628 
1629 	ewma_signal_init(&wcid->rssi);
1630 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1631 	phy->num_sta++;
1632 
1633 	mt76_wcid_init(wcid, phy->band_idx);
1634 out:
1635 	mutex_unlock(&dev->mutex);
1636 
1637 	return ret;
1638 }
1639 
__mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1640 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1641 		       struct ieee80211_sta *sta)
1642 {
1643 	struct mt76_dev *dev = phy->dev;
1644 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1645 	int i, idx = wcid->idx;
1646 
1647 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1648 		mt76_rx_aggr_stop(dev, wcid, i);
1649 
1650 	if (dev->drv->sta_remove)
1651 		dev->drv->sta_remove(dev, vif, sta);
1652 
1653 	mt76_wcid_cleanup(dev, wcid);
1654 
1655 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1656 	phy->num_sta--;
1657 }
1658 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1659 
1660 static void
mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1661 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1662 		struct ieee80211_sta *sta)
1663 {
1664 	struct mt76_dev *dev = phy->dev;
1665 
1666 	mutex_lock(&dev->mutex);
1667 	__mt76_sta_remove(phy, vif, sta);
1668 	mutex_unlock(&dev->mutex);
1669 }
1670 
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1671 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1672 		   struct ieee80211_sta *sta,
1673 		   enum ieee80211_sta_state old_state,
1674 		   enum ieee80211_sta_state new_state)
1675 {
1676 	struct mt76_phy *phy = hw->priv;
1677 	struct mt76_dev *dev = phy->dev;
1678 	enum mt76_sta_event ev;
1679 
1680 	phy = mt76_vif_phy(hw, vif);
1681 	if (!phy)
1682 		return -EINVAL;
1683 
1684 	if (old_state == IEEE80211_STA_NOTEXIST &&
1685 	    new_state == IEEE80211_STA_NONE)
1686 		return mt76_sta_add(phy, vif, sta);
1687 
1688 	if (old_state == IEEE80211_STA_NONE &&
1689 	    new_state == IEEE80211_STA_NOTEXIST)
1690 		mt76_sta_remove(phy, vif, sta);
1691 
1692 	if (!dev->drv->sta_event)
1693 		return 0;
1694 
1695 	if (old_state == IEEE80211_STA_AUTH &&
1696 	    new_state == IEEE80211_STA_ASSOC)
1697 		ev = MT76_STA_EVENT_ASSOC;
1698 	else if (old_state == IEEE80211_STA_ASSOC &&
1699 		 new_state == IEEE80211_STA_AUTHORIZED)
1700 		ev = MT76_STA_EVENT_AUTHORIZE;
1701 	else if (old_state == IEEE80211_STA_ASSOC &&
1702 		 new_state == IEEE80211_STA_AUTH)
1703 		ev = MT76_STA_EVENT_DISASSOC;
1704 	else
1705 		return 0;
1706 
1707 	return dev->drv->sta_event(dev, vif, sta, ev);
1708 }
1709 EXPORT_SYMBOL_GPL(mt76_sta_state);
1710 
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1711 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1712 			     struct ieee80211_sta *sta)
1713 {
1714 	struct mt76_phy *phy = hw->priv;
1715 	struct mt76_dev *dev = phy->dev;
1716 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1717 
1718 	mutex_lock(&dev->mutex);
1719 	spin_lock_bh(&dev->status_lock);
1720 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1721 	spin_unlock_bh(&dev->status_lock);
1722 	mutex_unlock(&dev->mutex);
1723 }
1724 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1725 
mt76_wcid_init(struct mt76_wcid * wcid,u8 band_idx)1726 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1727 {
1728 	wcid->hw_key_idx = -1;
1729 	wcid->phy_idx = band_idx;
1730 
1731 	INIT_LIST_HEAD(&wcid->tx_list);
1732 	skb_queue_head_init(&wcid->tx_pending);
1733 	skb_queue_head_init(&wcid->tx_offchannel);
1734 
1735 	INIT_LIST_HEAD(&wcid->list);
1736 	idr_init(&wcid->pktid);
1737 
1738 	INIT_LIST_HEAD(&wcid->poll_list);
1739 }
1740 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1741 
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1742 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1743 {
1744 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1745 	struct ieee80211_hw *hw;
1746 	struct sk_buff_head list;
1747 	struct sk_buff *skb;
1748 
1749 	mt76_tx_status_lock(dev, &list);
1750 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1751 	mt76_tx_status_unlock(dev, &list);
1752 
1753 	idr_destroy(&wcid->pktid);
1754 
1755 	spin_lock_bh(&phy->tx_lock);
1756 
1757 	if (!list_empty(&wcid->tx_list))
1758 		list_del_init(&wcid->tx_list);
1759 
1760 	spin_lock(&wcid->tx_pending.lock);
1761 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1762 	spin_unlock(&wcid->tx_pending.lock);
1763 
1764 	spin_lock(&wcid->tx_offchannel.lock);
1765 	skb_queue_splice_tail_init(&wcid->tx_offchannel, &list);
1766 	spin_unlock(&wcid->tx_offchannel.lock);
1767 
1768 	spin_unlock_bh(&phy->tx_lock);
1769 
1770 	while ((skb = __skb_dequeue(&list)) != NULL) {
1771 		hw = mt76_tx_status_get_hw(dev, skb);
1772 		ieee80211_free_txskb(hw, skb);
1773 	}
1774 }
1775 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1776 
mt76_wcid_add_poll(struct mt76_dev * dev,struct mt76_wcid * wcid)1777 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1778 {
1779 	if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
1780 		return;
1781 
1782 	spin_lock_bh(&dev->sta_poll_lock);
1783 	if (list_empty(&wcid->poll_list))
1784 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1785 	spin_unlock_bh(&dev->sta_poll_lock);
1786 }
1787 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1788 
mt76_get_power_bound(struct mt76_phy * phy,s8 txpower)1789 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
1790 {
1791 	int n_chains = hweight16(phy->chainmask);
1792 
1793 	txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
1794 	txpower -= mt76_tx_power_path_delta(n_chains);
1795 
1796 	return txpower;
1797 }
1798 EXPORT_SYMBOL_GPL(mt76_get_power_bound);
1799 
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,int * dbm)1800 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1801 		     unsigned int link_id, int *dbm)
1802 {
1803 	struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1804 	int n_chains, delta;
1805 
1806 	if (!phy)
1807 		return -EINVAL;
1808 
1809 	n_chains = hweight16(phy->chainmask);
1810 	delta = mt76_tx_power_path_delta(n_chains);
1811 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1812 
1813 	return 0;
1814 }
1815 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1816 
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1817 int mt76_init_sar_power(struct ieee80211_hw *hw,
1818 			const struct cfg80211_sar_specs *sar)
1819 {
1820 	struct mt76_phy *phy = hw->priv;
1821 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1822 	int i;
1823 
1824 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1825 		return -EINVAL;
1826 
1827 	for (i = 0; i < sar->num_sub_specs; i++) {
1828 		u32 index = sar->sub_specs[i].freq_range_index;
1829 		/* SAR specifies power limitaton in 0.25dbm */
1830 		s32 power = sar->sub_specs[i].power >> 1;
1831 
1832 		if (power > 127 || power < -127)
1833 			power = 127;
1834 
1835 		phy->frp[index].range = &capa->freq_ranges[index];
1836 		phy->frp[index].power = power;
1837 	}
1838 
1839 	return 0;
1840 }
1841 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1842 
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1843 int mt76_get_sar_power(struct mt76_phy *phy,
1844 		       struct ieee80211_channel *chan,
1845 		       int power)
1846 {
1847 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1848 	int freq, i;
1849 
1850 	if (!capa || !phy->frp)
1851 		return power;
1852 
1853 	if (power > 127 || power < -127)
1854 		power = 127;
1855 
1856 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1857 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1858 		if (phy->frp[i].range &&
1859 		    freq >= phy->frp[i].range->start_freq &&
1860 		    freq < phy->frp[i].range->end_freq) {
1861 			power = min_t(int, phy->frp[i].power, power);
1862 			break;
1863 		}
1864 	}
1865 
1866 	return power;
1867 }
1868 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1869 
1870 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1871 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1872 {
1873 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1874 		ieee80211_csa_finish(vif, 0);
1875 }
1876 
mt76_csa_finish(struct mt76_dev * dev)1877 void mt76_csa_finish(struct mt76_dev *dev)
1878 {
1879 	if (!dev->csa_complete)
1880 		return;
1881 
1882 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1883 		IEEE80211_IFACE_ITER_RESUME_ALL,
1884 		__mt76_csa_finish, dev);
1885 
1886 	dev->csa_complete = 0;
1887 }
1888 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1889 
1890 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1891 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1892 {
1893 	struct mt76_dev *dev = priv;
1894 
1895 	if (!vif->bss_conf.csa_active)
1896 		return;
1897 
1898 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1899 }
1900 
mt76_csa_check(struct mt76_dev * dev)1901 void mt76_csa_check(struct mt76_dev *dev)
1902 {
1903 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1904 		IEEE80211_IFACE_ITER_RESUME_ALL,
1905 		__mt76_csa_check, dev);
1906 }
1907 EXPORT_SYMBOL_GPL(mt76_csa_check);
1908 
1909 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1910 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1911 {
1912 	return 0;
1913 }
1914 EXPORT_SYMBOL_GPL(mt76_set_tim);
1915 
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1916 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1917 {
1918 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1919 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1920 	u8 *hdr, *pn = status->iv;
1921 
1922 	__skb_push(skb, 8);
1923 	memmove(skb->data, skb->data + 8, hdr_len);
1924 	hdr = skb->data + hdr_len;
1925 
1926 	hdr[0] = pn[5];
1927 	hdr[1] = pn[4];
1928 	hdr[2] = 0;
1929 	hdr[3] = 0x20 | (key_id << 6);
1930 	hdr[4] = pn[3];
1931 	hdr[5] = pn[2];
1932 	hdr[6] = pn[1];
1933 	hdr[7] = pn[0];
1934 
1935 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1936 }
1937 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1938 
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1939 int mt76_get_rate(struct mt76_dev *dev,
1940 		  struct ieee80211_supported_band *sband,
1941 		  int idx, bool cck)
1942 {
1943 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1944 	int i, offset = 0, len = sband->n_bitrates;
1945 
1946 	if (cck) {
1947 		if (!is_2g)
1948 			return 0;
1949 
1950 		idx &= ~BIT(2); /* short preamble */
1951 	} else if (is_2g) {
1952 		offset = 4;
1953 	}
1954 
1955 	for (i = offset; i < len; i++) {
1956 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1957 			return i;
1958 	}
1959 
1960 	return 0;
1961 }
1962 EXPORT_SYMBOL_GPL(mt76_get_rate);
1963 
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1964 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1965 		  const u8 *mac)
1966 {
1967 	struct mt76_phy *phy = hw->priv;
1968 
1969 	set_bit(MT76_SCANNING, &phy->state);
1970 }
1971 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1972 
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1973 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1974 {
1975 	struct mt76_phy *phy = hw->priv;
1976 
1977 	clear_bit(MT76_SCANNING, &phy->state);
1978 }
1979 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1980 
mt76_get_antenna(struct ieee80211_hw * hw,int radio_idx,u32 * tx_ant,u32 * rx_ant)1981 int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
1982 		     u32 *rx_ant)
1983 {
1984 	struct mt76_phy *phy = hw->priv;
1985 	struct mt76_dev *dev = phy->dev;
1986 	int i;
1987 
1988 	mutex_lock(&dev->mutex);
1989 	*tx_ant = 0;
1990 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1991 		if (dev->phys[i] && dev->phys[i]->hw == hw)
1992 			*tx_ant |= dev->phys[i]->chainmask;
1993 	*rx_ant = *tx_ant;
1994 	mutex_unlock(&dev->mutex);
1995 
1996 	return 0;
1997 }
1998 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1999 
2000 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)2001 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
2002 		int ring_base, void *wed, u32 flags)
2003 {
2004 	struct mt76_queue *hwq;
2005 	int err;
2006 
2007 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
2008 	if (!hwq)
2009 		return ERR_PTR(-ENOMEM);
2010 
2011 	hwq->flags = flags;
2012 	hwq->wed = wed;
2013 
2014 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
2015 	if (err < 0)
2016 		return ERR_PTR(err);
2017 
2018 	return hwq;
2019 }
2020 EXPORT_SYMBOL_GPL(mt76_init_queue);
2021 
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)2022 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
2023 			 struct mt76_sta_stats *stats, bool eht)
2024 {
2025 	int i, ei = wi->initial_stat_idx;
2026 	u64 *data = wi->data;
2027 
2028 	wi->sta_count++;
2029 
2030 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
2031 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
2032 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
2033 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
2034 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
2035 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
2036 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
2037 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
2038 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
2039 	if (eht) {
2040 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
2041 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
2042 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
2043 	}
2044 
2045 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
2046 		data[ei++] += stats->tx_bw[i];
2047 
2048 	for (i = 0; i < (eht ? 14 : 12); i++)
2049 		data[ei++] += stats->tx_mcs[i];
2050 
2051 	for (i = 0; i < 4; i++)
2052 		data[ei++] += stats->tx_nss[i];
2053 
2054 	wi->worker_stat_count = ei - wi->initial_stat_idx;
2055 }
2056 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
2057 
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)2058 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
2059 {
2060 #ifdef CONFIG_PAGE_POOL_STATS
2061 	struct page_pool_stats stats = {};
2062 	int i;
2063 
2064 	mt76_for_each_q_rx(dev, i)
2065 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
2066 
2067 	page_pool_ethtool_stats_get(data, &stats);
2068 	*index += page_pool_ethtool_stats_get_count();
2069 #endif
2070 }
2071 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
2072 
mt76_phy_dfs_state(struct mt76_phy * phy)2073 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
2074 {
2075 	struct ieee80211_hw *hw = phy->hw;
2076 	struct mt76_dev *dev = phy->dev;
2077 
2078 	if (dev->region == NL80211_DFS_UNSET ||
2079 	    test_bit(MT76_SCANNING, &phy->state))
2080 		return MT_DFS_STATE_DISABLED;
2081 
2082 	if (!phy->radar_enabled) {
2083 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
2084 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
2085 			return MT_DFS_STATE_ACTIVE;
2086 
2087 		return MT_DFS_STATE_DISABLED;
2088 	}
2089 
2090 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
2091 		return MT_DFS_STATE_CAC;
2092 
2093 	return MT_DFS_STATE_ACTIVE;
2094 }
2095 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
2096 
mt76_vif_cleanup(struct mt76_dev * dev,struct ieee80211_vif * vif)2097 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
2098 {
2099 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2100 	struct mt76_vif_data *mvif = mlink->mvif;
2101 
2102 	rcu_assign_pointer(mvif->link[0], NULL);
2103 	mt76_abort_scan(dev);
2104 	if (mvif->roc_phy)
2105 		mt76_abort_roc(mvif->roc_phy);
2106 }
2107 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2108 
mt76_select_links(struct ieee80211_vif * vif,int max_active_links)2109 u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links)
2110 {
2111 	unsigned long usable_links = ieee80211_vif_usable_links(vif);
2112 	struct  {
2113 		u8 link_id;
2114 		enum nl80211_band band;
2115 	} data[IEEE80211_MLD_MAX_NUM_LINKS];
2116 	unsigned int link_id;
2117 	int i, n_data = 0;
2118 	u16 sel_links = 0;
2119 
2120 	if (!ieee80211_vif_is_mld(vif))
2121 		return 0;
2122 
2123 	if (vif->active_links == usable_links)
2124 		return vif->active_links;
2125 
2126 	rcu_read_lock();
2127 	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
2128 		struct ieee80211_bss_conf *link_conf;
2129 
2130 		link_conf = rcu_dereference(vif->link_conf[link_id]);
2131 		if (WARN_ON_ONCE(!link_conf))
2132 			continue;
2133 
2134 		data[n_data].link_id = link_id;
2135 		data[n_data].band = link_conf->chanreq.oper.chan->band;
2136 		n_data++;
2137 	}
2138 	rcu_read_unlock();
2139 
2140 	for (i = 0; i < n_data; i++) {
2141 		int j;
2142 
2143 		if (!(BIT(data[i].link_id) & vif->active_links))
2144 			continue;
2145 
2146 		sel_links = BIT(data[i].link_id);
2147 		for (j = 0; j < n_data; j++) {
2148 			if (data[i].band != data[j].band) {
2149 				sel_links |= BIT(data[j].link_id);
2150 				if (hweight16(sel_links) == max_active_links)
2151 					break;
2152 			}
2153 		}
2154 		break;
2155 	}
2156 
2157 	return sel_links;
2158 }
2159 EXPORT_SYMBOL_GPL(mt76_select_links);
2160