xref: /freebsd/sys/contrib/dev/mediatek/mt76/mac80211.c (revision 14b53301e8d482654f94c23e6884fe96b3d26825)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #if defined(CONFIG_OF)
7 #include <linux/of.h>
8 #endif
9 #if defined(__FreeBSD__)
10 #include <linux/math64.h>
11 #include <linux/numa.h>
12 #endif
13 #include "mt76.h"
14 
15 #define CHAN2G(_idx, _freq) {			\
16 	.band = NL80211_BAND_2GHZ,		\
17 	.center_freq = (_freq),			\
18 	.hw_value = (_idx),			\
19 	.max_power = 30,			\
20 }
21 
22 #define CHAN5G(_idx, _freq) {			\
23 	.band = NL80211_BAND_5GHZ,		\
24 	.center_freq = (_freq),			\
25 	.hw_value = (_idx),			\
26 	.max_power = 30,			\
27 }
28 
29 #define CHAN6G(_idx, _freq) {			\
30 	.band = NL80211_BAND_6GHZ,		\
31 	.center_freq = (_freq),			\
32 	.hw_value = (_idx),			\
33 	.max_power = 30,			\
34 }
35 
36 static const struct ieee80211_channel mt76_channels_2ghz[] = {
37 	CHAN2G(1, 2412),
38 	CHAN2G(2, 2417),
39 	CHAN2G(3, 2422),
40 	CHAN2G(4, 2427),
41 	CHAN2G(5, 2432),
42 	CHAN2G(6, 2437),
43 	CHAN2G(7, 2442),
44 	CHAN2G(8, 2447),
45 	CHAN2G(9, 2452),
46 	CHAN2G(10, 2457),
47 	CHAN2G(11, 2462),
48 	CHAN2G(12, 2467),
49 	CHAN2G(13, 2472),
50 	CHAN2G(14, 2484),
51 };
52 
53 static const struct ieee80211_channel mt76_channels_5ghz[] = {
54 	CHAN5G(36, 5180),
55 	CHAN5G(40, 5200),
56 	CHAN5G(44, 5220),
57 	CHAN5G(48, 5240),
58 
59 	CHAN5G(52, 5260),
60 	CHAN5G(56, 5280),
61 	CHAN5G(60, 5300),
62 	CHAN5G(64, 5320),
63 
64 	CHAN5G(100, 5500),
65 	CHAN5G(104, 5520),
66 	CHAN5G(108, 5540),
67 	CHAN5G(112, 5560),
68 	CHAN5G(116, 5580),
69 	CHAN5G(120, 5600),
70 	CHAN5G(124, 5620),
71 	CHAN5G(128, 5640),
72 	CHAN5G(132, 5660),
73 	CHAN5G(136, 5680),
74 	CHAN5G(140, 5700),
75 	CHAN5G(144, 5720),
76 
77 	CHAN5G(149, 5745),
78 	CHAN5G(153, 5765),
79 	CHAN5G(157, 5785),
80 	CHAN5G(161, 5805),
81 	CHAN5G(165, 5825),
82 	CHAN5G(169, 5845),
83 	CHAN5G(173, 5865),
84 	CHAN5G(177, 5885),
85 };
86 
87 static const struct ieee80211_channel mt76_channels_6ghz[] = {
88 	/* UNII-5 */
89 	CHAN6G(1, 5955),
90 	CHAN6G(5, 5975),
91 	CHAN6G(9, 5995),
92 	CHAN6G(13, 6015),
93 	CHAN6G(17, 6035),
94 	CHAN6G(21, 6055),
95 	CHAN6G(25, 6075),
96 	CHAN6G(29, 6095),
97 	CHAN6G(33, 6115),
98 	CHAN6G(37, 6135),
99 	CHAN6G(41, 6155),
100 	CHAN6G(45, 6175),
101 	CHAN6G(49, 6195),
102 	CHAN6G(53, 6215),
103 	CHAN6G(57, 6235),
104 	CHAN6G(61, 6255),
105 	CHAN6G(65, 6275),
106 	CHAN6G(69, 6295),
107 	CHAN6G(73, 6315),
108 	CHAN6G(77, 6335),
109 	CHAN6G(81, 6355),
110 	CHAN6G(85, 6375),
111 	CHAN6G(89, 6395),
112 	CHAN6G(93, 6415),
113 	/* UNII-6 */
114 	CHAN6G(97, 6435),
115 	CHAN6G(101, 6455),
116 	CHAN6G(105, 6475),
117 	CHAN6G(109, 6495),
118 	CHAN6G(113, 6515),
119 	CHAN6G(117, 6535),
120 	/* UNII-7 */
121 	CHAN6G(121, 6555),
122 	CHAN6G(125, 6575),
123 	CHAN6G(129, 6595),
124 	CHAN6G(133, 6615),
125 	CHAN6G(137, 6635),
126 	CHAN6G(141, 6655),
127 	CHAN6G(145, 6675),
128 	CHAN6G(149, 6695),
129 	CHAN6G(153, 6715),
130 	CHAN6G(157, 6735),
131 	CHAN6G(161, 6755),
132 	CHAN6G(165, 6775),
133 	CHAN6G(169, 6795),
134 	CHAN6G(173, 6815),
135 	CHAN6G(177, 6835),
136 	CHAN6G(181, 6855),
137 	CHAN6G(185, 6875),
138 	/* UNII-8 */
139 	CHAN6G(189, 6895),
140 	CHAN6G(193, 6915),
141 	CHAN6G(197, 6935),
142 	CHAN6G(201, 6955),
143 	CHAN6G(205, 6975),
144 	CHAN6G(209, 6995),
145 	CHAN6G(213, 7015),
146 	CHAN6G(217, 7035),
147 	CHAN6G(221, 7055),
148 	CHAN6G(225, 7075),
149 	CHAN6G(229, 7095),
150 	CHAN6G(233, 7115),
151 };
152 
153 #if defined(CONFIG_MT76_LEDS)
154 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
155 	{ .throughput =   0 * 1024, .blink_time = 334 },
156 	{ .throughput =   1 * 1024, .blink_time = 260 },
157 	{ .throughput =   5 * 1024, .blink_time = 220 },
158 	{ .throughput =  10 * 1024, .blink_time = 190 },
159 	{ .throughput =  20 * 1024, .blink_time = 170 },
160 	{ .throughput =  50 * 1024, .blink_time = 150 },
161 	{ .throughput =  70 * 1024, .blink_time = 130 },
162 	{ .throughput = 100 * 1024, .blink_time = 110 },
163 	{ .throughput = 200 * 1024, .blink_time =  80 },
164 	{ .throughput = 300 * 1024, .blink_time =  50 },
165 };
166 #endif
167 
168 struct ieee80211_rate mt76_rates[] = {
169 	CCK_RATE(0, 10),
170 	CCK_RATE(1, 20),
171 	CCK_RATE(2, 55),
172 	CCK_RATE(3, 110),
173 	OFDM_RATE(11, 60),
174 	OFDM_RATE(15, 90),
175 	OFDM_RATE(10, 120),
176 	OFDM_RATE(14, 180),
177 	OFDM_RATE(9,  240),
178 	OFDM_RATE(13, 360),
179 	OFDM_RATE(8,  480),
180 	OFDM_RATE(12, 540),
181 };
182 EXPORT_SYMBOL_GPL(mt76_rates);
183 
184 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
185 	{ .start_freq = 2402, .end_freq = 2494, },
186 	{ .start_freq = 5150, .end_freq = 5350, },
187 	{ .start_freq = 5350, .end_freq = 5470, },
188 	{ .start_freq = 5470, .end_freq = 5725, },
189 	{ .start_freq = 5725, .end_freq = 5950, },
190 	{ .start_freq = 5945, .end_freq = 6165, },
191 	{ .start_freq = 6165, .end_freq = 6405, },
192 	{ .start_freq = 6405, .end_freq = 6525, },
193 	{ .start_freq = 6525, .end_freq = 6705, },
194 	{ .start_freq = 6705, .end_freq = 6865, },
195 	{ .start_freq = 6865, .end_freq = 7125, },
196 };
197 
198 static const struct cfg80211_sar_capa mt76_sar_capa = {
199 	.type = NL80211_SAR_TYPE_POWER,
200 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
201 	.freq_ranges = &mt76_sar_freq_ranges[0],
202 };
203 
204 #if defined(CONFIG_MT76_LEDS)
mt76_led_init(struct mt76_phy * phy)205 static int mt76_led_init(struct mt76_phy *phy)
206 {
207 	struct mt76_dev *dev = phy->dev;
208 	struct ieee80211_hw *hw = phy->hw;
209 	struct device_node *np = dev->dev->of_node;
210 
211 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
212 		return 0;
213 
214 	np = of_get_child_by_name(np, "led");
215 	if (np) {
216 		if (!of_device_is_available(np)) {
217 			of_node_put(np);
218 			dev_info(dev->dev,
219 				"led registration was explicitly disabled by dts\n");
220 			return 0;
221 		}
222 
223 		if (phy == &dev->phy) {
224 			int led_pin;
225 
226 			if (!of_property_read_u32(np, "led-sources", &led_pin))
227 				phy->leds.pin = led_pin;
228 
229 			phy->leds.al =
230 				of_property_read_bool(np, "led-active-low");
231 		}
232 
233 		of_node_put(np);
234 	}
235 
236 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
237 		 wiphy_name(hw->wiphy));
238 
239 	phy->leds.cdev.name = phy->leds.name;
240 	phy->leds.cdev.default_trigger =
241 		ieee80211_create_tpt_led_trigger(hw,
242 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
243 					mt76_tpt_blink,
244 					ARRAY_SIZE(mt76_tpt_blink));
245 
246 	dev_info(dev->dev,
247 		"registering led '%s'\n", phy->leds.name);
248 
249 	return led_classdev_register(dev->dev, &phy->leds.cdev);
250 }
251 
mt76_led_cleanup(struct mt76_phy * phy)252 static void mt76_led_cleanup(struct mt76_phy *phy)
253 {
254 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
255 		return;
256 
257 	led_classdev_unregister(&phy->leds.cdev);
258 }
259 #endif
260 
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)261 static void mt76_init_stream_cap(struct mt76_phy *phy,
262 				 struct ieee80211_supported_band *sband,
263 				 bool vht)
264 {
265 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
266 	int i, nstream = hweight8(phy->antenna_mask);
267 	struct ieee80211_sta_vht_cap *vht_cap;
268 	u16 mcs_map = 0;
269 
270 	if (nstream > 1)
271 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
272 	else
273 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
274 
275 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
276 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
277 
278 	if (!vht)
279 		return;
280 
281 	vht_cap = &sband->vht_cap;
282 	if (nstream > 1)
283 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
284 	else
285 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
286 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
287 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
288 
289 	for (i = 0; i < 8; i++) {
290 		if (i < nstream)
291 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
292 		else
293 			mcs_map |=
294 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
295 	}
296 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
297 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
298 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
299 		vht_cap->vht_mcs.tx_highest |=
300 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
301 }
302 
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)303 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
304 {
305 	if (phy->cap.has_2ghz)
306 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
307 	if (phy->cap.has_5ghz)
308 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
309 	if (phy->cap.has_6ghz)
310 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
311 }
312 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
313 
314 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)315 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
316 		const struct ieee80211_channel *chan, int n_chan,
317 		struct ieee80211_rate *rates, int n_rates,
318 		bool ht, bool vht)
319 {
320 	struct ieee80211_supported_band *sband = &msband->sband;
321 	struct ieee80211_sta_vht_cap *vht_cap;
322 	struct ieee80211_sta_ht_cap *ht_cap;
323 	struct mt76_dev *dev = phy->dev;
324 	void *chanlist;
325 	int size;
326 
327 	size = n_chan * sizeof(*chan);
328 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
329 	if (!chanlist)
330 		return -ENOMEM;
331 
332 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
333 				    GFP_KERNEL);
334 	if (!msband->chan)
335 		return -ENOMEM;
336 
337 	sband->channels = chanlist;
338 	sband->n_channels = n_chan;
339 	sband->bitrates = rates;
340 	sband->n_bitrates = n_rates;
341 
342 	if (!ht)
343 		return 0;
344 
345 	ht_cap = &sband->ht_cap;
346 	ht_cap->ht_supported = true;
347 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
348 		       IEEE80211_HT_CAP_GRN_FLD |
349 		       IEEE80211_HT_CAP_SGI_20 |
350 		       IEEE80211_HT_CAP_SGI_40 |
351 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
352 
353 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
354 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
355 
356 	mt76_init_stream_cap(phy, sband, vht);
357 
358 	if (!vht)
359 		return 0;
360 
361 	vht_cap = &sband->vht_cap;
362 	vht_cap->vht_supported = true;
363 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
364 			IEEE80211_VHT_CAP_RXSTBC_1 |
365 			IEEE80211_VHT_CAP_SHORT_GI_80 |
366 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
367 
368 	return 0;
369 }
370 
371 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)372 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
373 		   int n_rates)
374 {
375 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
376 
377 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
378 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
379 			       n_rates, true, false);
380 }
381 
382 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)383 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
384 		   int n_rates, bool vht)
385 {
386 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
387 
388 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
389 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
390 			       n_rates, true, vht);
391 }
392 
393 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)394 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
395 		   int n_rates)
396 {
397 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
398 
399 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
400 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
401 			       n_rates, false, false);
402 }
403 
404 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)405 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
406 		 enum nl80211_band band)
407 {
408 	struct ieee80211_supported_band *sband = &msband->sband;
409 	bool found = false;
410 	int i;
411 
412 	if (!sband)
413 		return;
414 
415 	for (i = 0; i < sband->n_channels; i++) {
416 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
417 			continue;
418 
419 		found = true;
420 		break;
421 	}
422 
423 	if (found) {
424 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
425 					NL80211_CHAN_HT20);
426 		phy->chan_state = &msband->chan[0];
427 		phy->dev->band_phys[band] = phy;
428 		return;
429 	}
430 
431 	sband->n_channels = 0;
432 	if (phy->hw->wiphy->bands[band] == sband)
433 		phy->hw->wiphy->bands[band] = NULL;
434 }
435 
436 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)437 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
438 {
439 	struct mt76_dev *dev = phy->dev;
440 	struct wiphy *wiphy = hw->wiphy;
441 
442 	INIT_LIST_HEAD(&phy->tx_list);
443 	spin_lock_init(&phy->tx_lock);
444 	INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
445 
446 	if ((void *)phy != hw->priv)
447 		return 0;
448 
449 	SET_IEEE80211_DEV(hw, dev->dev);
450 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
451 
452 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
453 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
454 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
455 			WIPHY_FLAG_SUPPORTS_TDLS |
456 			WIPHY_FLAG_AP_UAPSD;
457 
458 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
459 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
460 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
461 
462 	if (!wiphy->available_antennas_tx)
463 		wiphy->available_antennas_tx = phy->antenna_mask;
464 	if (!wiphy->available_antennas_rx)
465 		wiphy->available_antennas_rx = phy->antenna_mask;
466 
467 	wiphy->sar_capa = &mt76_sar_capa;
468 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
469 				sizeof(struct mt76_freq_range_power),
470 				GFP_KERNEL);
471 	if (!phy->frp)
472 		return -ENOMEM;
473 
474 	hw->txq_data_size = sizeof(struct mt76_txq);
475 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
476 
477 	if (!hw->max_tx_fragments)
478 		hw->max_tx_fragments = 16;
479 
480 	ieee80211_hw_set(hw, SIGNAL_DBM);
481 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
482 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
483 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
484 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
485 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
486 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
487 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
488 
489 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
490 	    hw->max_tx_fragments > 1) {
491 		ieee80211_hw_set(hw, TX_AMSDU);
492 		ieee80211_hw_set(hw, TX_FRAG_LIST);
493 	}
494 
495 	ieee80211_hw_set(hw, MFP_CAPABLE);
496 	ieee80211_hw_set(hw, AP_LINK_PS);
497 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
498 
499 	return 0;
500 }
501 
502 struct mt76_phy *
mt76_alloc_radio_phy(struct mt76_dev * dev,unsigned int size,u8 band_idx)503 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
504 		     u8 band_idx)
505 {
506 	struct ieee80211_hw *hw = dev->phy.hw;
507 	unsigned int phy_size;
508 	struct mt76_phy *phy;
509 
510 	phy_size = ALIGN(sizeof(*phy), 8);
511 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
512 	if (!phy)
513 		return NULL;
514 
515 	phy->dev = dev;
516 	phy->hw = hw;
517 #if defined(__linux__)
518 	phy->priv = (void *)phy + phy_size;
519 #elif defined(__FreeBSD__)
520 	phy->priv = (u8 *)phy + phy_size;
521 #endif
522 	phy->band_idx = band_idx;
523 
524 	return phy;
525 }
526 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
527 
528 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)529 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
530 	       const struct ieee80211_ops *ops, u8 band_idx)
531 {
532 	struct ieee80211_hw *hw;
533 	unsigned int phy_size;
534 	struct mt76_phy *phy;
535 
536 	phy_size = ALIGN(sizeof(*phy), 8);
537 	hw = ieee80211_alloc_hw(size + phy_size, ops);
538 	if (!hw)
539 		return NULL;
540 
541 	phy = hw->priv;
542 	phy->dev = dev;
543 	phy->hw = hw;
544 #if defined(__linux__)
545 	phy->priv = hw->priv + phy_size;
546 #elif defined(__FreeBSD__)
547 	phy->priv = (u8 *)hw->priv + phy_size;
548 #endif
549 	phy->band_idx = band_idx;
550 
551 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
552 	hw->wiphy->interface_modes =
553 		BIT(NL80211_IFTYPE_STATION) |
554 		BIT(NL80211_IFTYPE_AP) |
555 #ifdef CONFIG_MAC80211_MESH
556 		BIT(NL80211_IFTYPE_MESH_POINT) |
557 #endif
558 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
559 		BIT(NL80211_IFTYPE_P2P_GO) |
560 		BIT(NL80211_IFTYPE_ADHOC);
561 
562 	return phy;
563 }
564 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
565 
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)566 int mt76_register_phy(struct mt76_phy *phy, bool vht,
567 		      struct ieee80211_rate *rates, int n_rates)
568 {
569 	int ret;
570 
571 	ret = mt76_phy_init(phy, phy->hw);
572 	if (ret)
573 		return ret;
574 
575 	if (phy->cap.has_2ghz) {
576 		ret = mt76_init_sband_2g(phy, rates, n_rates);
577 		if (ret)
578 			return ret;
579 	}
580 
581 	if (phy->cap.has_5ghz) {
582 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
583 		if (ret)
584 			return ret;
585 	}
586 
587 	if (phy->cap.has_6ghz) {
588 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
589 		if (ret)
590 			return ret;
591 	}
592 
593 #if defined(CONFIG_MT76_LEDS)
594 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
595 		ret = mt76_led_init(phy);
596 		if (ret)
597 			return ret;
598 	}
599 #endif
600 
601 	wiphy_read_of_freq_limits(phy->hw->wiphy);
602 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
603 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
604 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
605 
606 	if ((void *)phy == phy->hw->priv) {
607 		ret = ieee80211_register_hw(phy->hw);
608 		if (ret)
609 			return ret;
610 	}
611 
612 	set_bit(MT76_STATE_REGISTERED, &phy->state);
613 	phy->dev->phys[phy->band_idx] = phy;
614 
615 	return 0;
616 }
617 EXPORT_SYMBOL_GPL(mt76_register_phy);
618 
mt76_unregister_phy(struct mt76_phy * phy)619 void mt76_unregister_phy(struct mt76_phy *phy)
620 {
621 	struct mt76_dev *dev = phy->dev;
622 
623 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
624 		return;
625 
626 #if defined(CONFIG_MT76_LEDS)
627 	if (IS_ENABLED(CONFIG_MT76_LEDS))
628 		mt76_led_cleanup(phy);
629 #endif
630 	mt76_tx_status_check(dev, true);
631 	ieee80211_unregister_hw(phy->hw);
632 	dev->phys[phy->band_idx] = NULL;
633 }
634 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
635 
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)636 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
637 {
638 	bool is_qrx = mt76_queue_is_rx(dev, q);
639 	struct page_pool_params pp_params = {
640 		.order = 0,
641 		.flags = 0,
642 		.nid = NUMA_NO_NODE,
643 		.dev = dev->dma_dev,
644 	};
645 	int idx = is_qrx ? q - dev->q_rx : -1;
646 
647 	/* Allocate page_pools just for rx/wed_tx_free queues */
648 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
649 		return 0;
650 
651 	switch (idx) {
652 	case MT_RXQ_MAIN:
653 	case MT_RXQ_BAND1:
654 	case MT_RXQ_BAND2:
655 		pp_params.pool_size = 256;
656 		break;
657 	default:
658 		pp_params.pool_size = 16;
659 		break;
660 	}
661 
662 	if (mt76_is_mmio(dev)) {
663 		/* rely on page_pool for DMA mapping */
664 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
665 		pp_params.dma_dir = DMA_FROM_DEVICE;
666 		pp_params.max_len = PAGE_SIZE;
667 		pp_params.offset = 0;
668 		/* NAPI is available just for rx queues */
669 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
670 			pp_params.napi = &dev->napi[idx];
671 	}
672 
673 	q->page_pool = page_pool_create(&pp_params);
674 	if (IS_ERR(q->page_pool)) {
675 		int err = PTR_ERR(q->page_pool);
676 
677 		q->page_pool = NULL;
678 		return err;
679 	}
680 
681 	return 0;
682 }
683 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
684 
685 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)686 mt76_alloc_device(struct device *pdev, unsigned int size,
687 		  const struct ieee80211_ops *ops,
688 		  const struct mt76_driver_ops *drv_ops)
689 {
690 	struct ieee80211_hw *hw;
691 	struct mt76_phy *phy;
692 	struct mt76_dev *dev;
693 	int i;
694 
695 	hw = ieee80211_alloc_hw(size, ops);
696 	if (!hw)
697 		return NULL;
698 
699 	dev = hw->priv;
700 	dev->hw = hw;
701 	dev->dev = pdev;
702 	dev->drv = drv_ops;
703 	dev->dma_dev = pdev;
704 
705 	phy = &dev->phy;
706 	phy->dev = dev;
707 	phy->hw = hw;
708 	phy->band_idx = MT_BAND0;
709 	dev->phys[phy->band_idx] = phy;
710 
711 	spin_lock_init(&dev->rx_lock);
712 	spin_lock_init(&dev->lock);
713 	spin_lock_init(&dev->cc_lock);
714 	spin_lock_init(&dev->status_lock);
715 	spin_lock_init(&dev->wed_lock);
716 	mutex_init(&dev->mutex);
717 	init_waitqueue_head(&dev->tx_wait);
718 
719 	skb_queue_head_init(&dev->mcu.res_q);
720 	init_waitqueue_head(&dev->mcu.wait);
721 	mutex_init(&dev->mcu.mutex);
722 	dev->tx_worker.fn = mt76_tx_worker;
723 
724 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
725 	hw->wiphy->interface_modes =
726 		BIT(NL80211_IFTYPE_STATION) |
727 		BIT(NL80211_IFTYPE_AP) |
728 #ifdef CONFIG_MAC80211_MESH
729 		BIT(NL80211_IFTYPE_MESH_POINT) |
730 #endif
731 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
732 		BIT(NL80211_IFTYPE_P2P_GO) |
733 		BIT(NL80211_IFTYPE_ADHOC);
734 
735 	spin_lock_init(&dev->token_lock);
736 	idr_init(&dev->token);
737 
738 	spin_lock_init(&dev->rx_token_lock);
739 	idr_init(&dev->rx_token);
740 
741 	INIT_LIST_HEAD(&dev->wcid_list);
742 	INIT_LIST_HEAD(&dev->sta_poll_list);
743 	spin_lock_init(&dev->sta_poll_lock);
744 
745 	INIT_LIST_HEAD(&dev->txwi_cache);
746 	INIT_LIST_HEAD(&dev->rxwi_cache);
747 	dev->token_size = dev->drv->token_size;
748 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
749 
750 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
751 		skb_queue_head_init(&dev->rx_skb[i]);
752 
753 	dev->wq = alloc_ordered_workqueue("mt76", 0);
754 	if (!dev->wq) {
755 		ieee80211_free_hw(hw);
756 		return NULL;
757 	}
758 
759 	return dev;
760 }
761 EXPORT_SYMBOL_GPL(mt76_alloc_device);
762 
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)763 int mt76_register_device(struct mt76_dev *dev, bool vht,
764 			 struct ieee80211_rate *rates, int n_rates)
765 {
766 	struct ieee80211_hw *hw = dev->hw;
767 	struct mt76_phy *phy = &dev->phy;
768 	int ret;
769 
770 	dev_set_drvdata(dev->dev, dev);
771 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
772 	ret = mt76_phy_init(phy, hw);
773 	if (ret)
774 		return ret;
775 
776 	if (phy->cap.has_2ghz) {
777 		ret = mt76_init_sband_2g(phy, rates, n_rates);
778 		if (ret)
779 			return ret;
780 	}
781 
782 	if (phy->cap.has_5ghz) {
783 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
784 		if (ret)
785 			return ret;
786 	}
787 
788 	if (phy->cap.has_6ghz) {
789 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
790 		if (ret)
791 			return ret;
792 	}
793 
794 	wiphy_read_of_freq_limits(hw->wiphy);
795 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
796 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
797 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
798 
799 #if defined(CONFIG_MT76_LEDS)
800 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
801 		ret = mt76_led_init(phy);
802 		if (ret)
803 			return ret;
804 	}
805 #endif
806 
807 	ret = ieee80211_register_hw(hw);
808 	if (ret)
809 		return ret;
810 
811 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
812 	set_bit(MT76_STATE_REGISTERED, &phy->state);
813 	sched_set_fifo_low(dev->tx_worker.task);
814 
815 	return 0;
816 }
817 EXPORT_SYMBOL_GPL(mt76_register_device);
818 
mt76_unregister_device(struct mt76_dev * dev)819 void mt76_unregister_device(struct mt76_dev *dev)
820 {
821 #if defined(__linux__)
822 	struct ieee80211_hw *hw = dev->hw;
823 #endif
824 
825 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
826 		return;
827 
828 #if defined(CONFIG_MT76_LEDS)
829 	if (IS_ENABLED(CONFIG_MT76_LEDS))
830 		mt76_led_cleanup(&dev->phy);
831 #endif
832 	mt76_tx_status_check(dev, true);
833 	mt76_wcid_cleanup(dev, &dev->global_wcid);
834 #if defined(__linux__)
835 	ieee80211_unregister_hw(hw);
836 #elif defined(__FreeBSD__)
837 	ieee80211_unregister_hw(dev->hw);
838 #endif
839 }
840 EXPORT_SYMBOL_GPL(mt76_unregister_device);
841 
mt76_free_device(struct mt76_dev * dev)842 void mt76_free_device(struct mt76_dev *dev)
843 {
844 	mt76_worker_teardown(&dev->tx_worker);
845 	if (dev->wq) {
846 		destroy_workqueue(dev->wq);
847 		dev->wq = NULL;
848 	}
849 	ieee80211_free_hw(dev->hw);
850 }
851 EXPORT_SYMBOL_GPL(mt76_free_device);
852 
mt76_reset_phy(struct mt76_phy * phy)853 static void mt76_reset_phy(struct mt76_phy *phy)
854 {
855 	if (!phy)
856 		return;
857 
858 	INIT_LIST_HEAD(&phy->tx_list);
859 }
860 
mt76_reset_device(struct mt76_dev * dev)861 void mt76_reset_device(struct mt76_dev *dev)
862 {
863 	int i;
864 
865 	rcu_read_lock();
866 	for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
867 		struct mt76_wcid *wcid;
868 
869 		wcid = rcu_dereference(dev->wcid[i]);
870 		if (!wcid)
871 			continue;
872 
873 		wcid->sta = 0;
874 		mt76_wcid_cleanup(dev, wcid);
875 		rcu_assign_pointer(dev->wcid[i], NULL);
876 	}
877 	rcu_read_unlock();
878 
879 	INIT_LIST_HEAD(&dev->wcid_list);
880 	INIT_LIST_HEAD(&dev->sta_poll_list);
881 	dev->vif_mask = 0;
882 	memset(dev->wcid_mask, 0, sizeof(dev->wcid_mask));
883 
884 	mt76_reset_phy(&dev->phy);
885 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
886 		mt76_reset_phy(dev->phys[i]);
887 }
888 EXPORT_SYMBOL_GPL(mt76_reset_device);
889 
mt76_vif_phy(struct ieee80211_hw * hw,struct ieee80211_vif * vif)890 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
891 			      struct ieee80211_vif *vif)
892 {
893 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
894 	struct mt76_chanctx *ctx;
895 
896 	if (!hw->wiphy->n_radio)
897 		return hw->priv;
898 
899 	if (!mlink->ctx)
900 		return NULL;
901 
902 	ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
903 	return ctx->phy;
904 }
905 EXPORT_SYMBOL_GPL(mt76_vif_phy);
906 
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)907 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
908 {
909 	struct sk_buff *skb = phy->rx_amsdu[q].head;
910 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
911 	struct mt76_dev *dev = phy->dev;
912 
913 	phy->rx_amsdu[q].head = NULL;
914 	phy->rx_amsdu[q].tail = NULL;
915 
916 	/*
917 	 * Validate if the amsdu has a proper first subframe.
918 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
919 	 * flag of the QoS header gets flipped. In such cases, the first
920 	 * subframe has a LLC/SNAP header in the location of the destination
921 	 * address.
922 	 */
923 	if (skb_shinfo(skb)->frag_list) {
924 		int offset = 0;
925 
926 		if (!(status->flag & RX_FLAG_8023)) {
927 			offset = ieee80211_get_hdrlen_from_skb(skb);
928 
929 			if ((status->flag &
930 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
931 			    RX_FLAG_DECRYPTED)
932 				offset += 8;
933 		}
934 
935 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
936 			dev_kfree_skb(skb);
937 			return;
938 		}
939 	}
940 	__skb_queue_tail(&dev->rx_skb[q], skb);
941 }
942 
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)943 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
944 				  struct sk_buff *skb)
945 {
946 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
947 
948 	if (phy->rx_amsdu[q].head &&
949 	    (!status->amsdu || status->first_amsdu ||
950 	     status->seqno != phy->rx_amsdu[q].seqno))
951 		mt76_rx_release_amsdu(phy, q);
952 
953 	if (!phy->rx_amsdu[q].head) {
954 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
955 		phy->rx_amsdu[q].seqno = status->seqno;
956 		phy->rx_amsdu[q].head = skb;
957 	} else {
958 		*phy->rx_amsdu[q].tail = skb;
959 		phy->rx_amsdu[q].tail = &skb->next;
960 	}
961 
962 	if (!status->amsdu || status->last_amsdu)
963 		mt76_rx_release_amsdu(phy, q);
964 }
965 
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)966 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
967 {
968 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
969 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
970 
971 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
972 		dev_kfree_skb(skb);
973 		return;
974 	}
975 
976 #ifdef CONFIG_NL80211_TESTMODE
977 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
978 		phy->test.rx_stats.packets[q]++;
979 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
980 			phy->test.rx_stats.fcs_error[q]++;
981 	}
982 #endif
983 
984 	mt76_rx_release_burst(phy, q, skb);
985 }
986 EXPORT_SYMBOL_GPL(mt76_rx);
987 
mt76_has_tx_pending(struct mt76_phy * phy)988 bool mt76_has_tx_pending(struct mt76_phy *phy)
989 {
990 	struct mt76_queue *q;
991 	int i;
992 
993 	for (i = 0; i < __MT_TXQ_MAX; i++) {
994 		q = phy->q_tx[i];
995 		if (q && q->queued)
996 			return true;
997 	}
998 
999 	return false;
1000 }
1001 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
1002 
1003 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)1004 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
1005 {
1006 	struct mt76_sband *msband;
1007 	int idx;
1008 
1009 	if (c->band == NL80211_BAND_2GHZ)
1010 		msband = &phy->sband_2g;
1011 	else if (c->band == NL80211_BAND_6GHZ)
1012 		msband = &phy->sband_6g;
1013 	else
1014 		msband = &phy->sband_5g;
1015 
1016 	idx = c - &msband->sband.channels[0];
1017 	return &msband->chan[idx];
1018 }
1019 
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)1020 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
1021 {
1022 	struct mt76_channel_state *state = phy->chan_state;
1023 
1024 	state->cc_active += ktime_to_us(ktime_sub(time,
1025 						  phy->survey_time));
1026 	phy->survey_time = time;
1027 }
1028 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
1029 
mt76_update_survey(struct mt76_phy * phy)1030 void mt76_update_survey(struct mt76_phy *phy)
1031 {
1032 	struct mt76_dev *dev = phy->dev;
1033 	ktime_t cur_time;
1034 
1035 	if (dev->drv->update_survey)
1036 		dev->drv->update_survey(phy);
1037 
1038 	cur_time = ktime_get_boottime();
1039 	mt76_update_survey_active_time(phy, cur_time);
1040 
1041 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
1042 		struct mt76_channel_state *state = phy->chan_state;
1043 
1044 		spin_lock_bh(&dev->cc_lock);
1045 		state->cc_bss_rx += dev->cur_cc_bss_rx;
1046 		dev->cur_cc_bss_rx = 0;
1047 		spin_unlock_bh(&dev->cc_lock);
1048 	}
1049 }
1050 EXPORT_SYMBOL_GPL(mt76_update_survey);
1051 
__mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1052 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1053 		       bool offchannel)
1054 {
1055 	struct mt76_dev *dev = phy->dev;
1056 	int timeout = HZ / 5;
1057 	int ret;
1058 
1059 	set_bit(MT76_RESET, &phy->state);
1060 
1061 	mt76_worker_disable(&dev->tx_worker);
1062 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
1063 	mt76_update_survey(phy);
1064 
1065 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
1066 	    phy->chandef.width != chandef->width)
1067 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
1068 
1069 	phy->chandef = *chandef;
1070 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
1071 	phy->offchannel = offchannel;
1072 
1073 	if (!offchannel)
1074 		phy->main_chandef = *chandef;
1075 
1076 	if (chandef->chan != phy->main_chandef.chan)
1077 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1078 
1079 	ret = dev->drv->set_channel(phy);
1080 
1081 	clear_bit(MT76_RESET, &phy->state);
1082 	mt76_worker_enable(&dev->tx_worker);
1083 	mt76_worker_schedule(&dev->tx_worker);
1084 
1085 	return ret;
1086 }
1087 
mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1088 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1089 		     bool offchannel)
1090 {
1091 	struct mt76_dev *dev = phy->dev;
1092 	int ret;
1093 
1094 	cancel_delayed_work_sync(&phy->mac_work);
1095 
1096 	mutex_lock(&dev->mutex);
1097 	ret = __mt76_set_channel(phy, chandef, offchannel);
1098 	mutex_unlock(&dev->mutex);
1099 
1100 	return ret;
1101 }
1102 
mt76_update_channel(struct mt76_phy * phy)1103 int mt76_update_channel(struct mt76_phy *phy)
1104 {
1105 	struct ieee80211_hw *hw = phy->hw;
1106 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1107 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1108 
1109 	phy->radar_enabled = hw->conf.radar_enabled;
1110 
1111 	return mt76_set_channel(phy, chandef, offchannel);
1112 }
1113 EXPORT_SYMBOL_GPL(mt76_update_channel);
1114 
1115 static struct mt76_sband *
mt76_get_survey_sband(struct mt76_phy * phy,int * idx)1116 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1117 {
1118 	if (*idx < phy->sband_2g.sband.n_channels)
1119 		return &phy->sband_2g;
1120 
1121 	*idx -= phy->sband_2g.sband.n_channels;
1122 	if (*idx < phy->sband_5g.sband.n_channels)
1123 		return &phy->sband_5g;
1124 
1125 	*idx -= phy->sband_5g.sband.n_channels;
1126 	if (*idx < phy->sband_6g.sband.n_channels)
1127 		return &phy->sband_6g;
1128 
1129 	*idx -= phy->sband_6g.sband.n_channels;
1130 	return NULL;
1131 }
1132 
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1133 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1134 		    struct survey_info *survey)
1135 {
1136 	struct mt76_phy *phy = hw->priv;
1137 	struct mt76_dev *dev = phy->dev;
1138 	struct mt76_sband *sband = NULL;
1139 	struct ieee80211_channel *chan;
1140 	struct mt76_channel_state *state;
1141 	int phy_idx = 0;
1142 	int ret = 0;
1143 
1144 	mutex_lock(&dev->mutex);
1145 
1146 	for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1147 		sband = NULL;
1148 		phy = dev->phys[phy_idx];
1149 		if (!phy || phy->hw != hw)
1150 			continue;
1151 
1152 		sband = mt76_get_survey_sband(phy, &idx);
1153 
1154 		if (idx == 0 && phy->dev->drv->update_survey)
1155 			mt76_update_survey(phy);
1156 
1157 		if (sband || !hw->wiphy->n_radio)
1158 			break;
1159 	}
1160 
1161 	if (!sband) {
1162 		ret = -ENOENT;
1163 		goto out;
1164 	}
1165 
1166 	chan = &sband->sband.channels[idx];
1167 	state = mt76_channel_state(phy, chan);
1168 
1169 	memset(survey, 0, sizeof(*survey));
1170 	survey->channel = chan;
1171 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1172 	survey->filled |= dev->drv->survey_flags;
1173 	if (state->noise)
1174 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1175 
1176 	if (chan == phy->main_chandef.chan) {
1177 		survey->filled |= SURVEY_INFO_IN_USE;
1178 
1179 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1180 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1181 	}
1182 
1183 	survey->time_busy = div_u64(state->cc_busy, 1000);
1184 	survey->time_rx = div_u64(state->cc_rx, 1000);
1185 	survey->time = div_u64(state->cc_active, 1000);
1186 	survey->noise = state->noise;
1187 
1188 	spin_lock_bh(&dev->cc_lock);
1189 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1190 	survey->time_tx = div_u64(state->cc_tx, 1000);
1191 	spin_unlock_bh(&dev->cc_lock);
1192 
1193 out:
1194 	mutex_unlock(&dev->mutex);
1195 
1196 	return ret;
1197 }
1198 EXPORT_SYMBOL_GPL(mt76_get_survey);
1199 
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1200 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1201 			 struct ieee80211_key_conf *key)
1202 {
1203 	struct ieee80211_key_seq seq;
1204 	int i;
1205 
1206 	wcid->rx_check_pn = false;
1207 
1208 	if (!key)
1209 		return;
1210 
1211 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1212 		return;
1213 
1214 	wcid->rx_check_pn = true;
1215 
1216 	/* data frame */
1217 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1218 		ieee80211_get_key_rx_seq(key, i, &seq);
1219 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1220 	}
1221 
1222 	/* robust management frame */
1223 	ieee80211_get_key_rx_seq(key, -1, &seq);
1224 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1225 
1226 }
1227 EXPORT_SYMBOL(mt76_wcid_key_setup);
1228 
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1229 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1230 {
1231 	int signal = -128;
1232 	u8 chains;
1233 
1234 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1235 		int cur, diff;
1236 
1237 		cur = *chain_signal;
1238 		if (!(chains & BIT(0)) ||
1239 		    cur > 0)
1240 			continue;
1241 
1242 		if (cur > signal)
1243 			swap(cur, signal);
1244 
1245 		diff = signal - cur;
1246 		if (diff == 0)
1247 			signal += 3;
1248 		else if (diff <= 2)
1249 			signal += 2;
1250 		else if (diff <= 6)
1251 			signal += 1;
1252 	}
1253 
1254 	return signal;
1255 }
1256 EXPORT_SYMBOL(mt76_rx_signal);
1257 
1258 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1259 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1260 		struct ieee80211_hw **hw,
1261 		struct ieee80211_sta **sta)
1262 {
1263 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1264 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1265 	struct mt76_rx_status mstat;
1266 
1267 	mstat = *((struct mt76_rx_status *)skb->cb);
1268 	memset(status, 0, sizeof(*status));
1269 
1270 	status->flag = mstat.flag;
1271 	status->freq = mstat.freq;
1272 	status->enc_flags = mstat.enc_flags;
1273 	status->encoding = mstat.encoding;
1274 	status->bw = mstat.bw;
1275 	if (status->encoding == RX_ENC_EHT) {
1276 		status->eht.ru = mstat.eht.ru;
1277 		status->eht.gi = mstat.eht.gi;
1278 	} else {
1279 		status->he_ru = mstat.he_ru;
1280 		status->he_gi = mstat.he_gi;
1281 		status->he_dcm = mstat.he_dcm;
1282 	}
1283 	status->rate_idx = mstat.rate_idx;
1284 	status->nss = mstat.nss;
1285 	status->band = mstat.band;
1286 	status->signal = mstat.signal;
1287 	status->chains = mstat.chains;
1288 	status->ampdu_reference = mstat.ampdu_ref;
1289 	status->device_timestamp = mstat.timestamp;
1290 	status->mactime = mstat.timestamp;
1291 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1292 	if (status->signal <= -128)
1293 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1294 
1295 	if (ieee80211_is_beacon(hdr->frame_control) ||
1296 	    ieee80211_is_probe_resp(hdr->frame_control))
1297 		status->boottime_ns = ktime_get_boottime_ns();
1298 
1299 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1300 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1301 		     sizeof(mstat.chain_signal));
1302 	memcpy(status->chain_signal, mstat.chain_signal,
1303 	       sizeof(mstat.chain_signal));
1304 
1305 	if (mstat.wcid) {
1306 		status->link_valid = mstat.wcid->link_valid;
1307 		status->link_id = mstat.wcid->link_id;
1308 	}
1309 
1310 	*sta = wcid_to_sta(mstat.wcid);
1311 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1312 }
1313 
1314 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1315 mt76_check_ccmp_pn(struct sk_buff *skb)
1316 {
1317 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1318 	struct mt76_wcid *wcid = status->wcid;
1319 	struct ieee80211_hdr *hdr;
1320 	int security_idx;
1321 	int ret;
1322 
1323 	if (!(status->flag & RX_FLAG_DECRYPTED))
1324 		return;
1325 
1326 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1327 		return;
1328 
1329 	if (!wcid || !wcid->rx_check_pn)
1330 		return;
1331 
1332 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1333 	if (status->flag & RX_FLAG_8023)
1334 		goto skip_hdr_check;
1335 
1336 	hdr = mt76_skb_get_hdr(skb);
1337 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1338 		/*
1339 		 * Validate the first fragment both here and in mac80211
1340 		 * All further fragments will be validated by mac80211 only.
1341 		 */
1342 		if (ieee80211_is_frag(hdr) &&
1343 		    !ieee80211_is_first_frag(hdr->frame_control))
1344 			return;
1345 	}
1346 
1347 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1348 	 *
1349 	 * the recipient shall maintain a single replay counter for received
1350 	 * individually addressed robust Management frames that are received
1351 	 * with the To DS subfield equal to 0, [...]
1352 	 */
1353 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1354 	    !ieee80211_has_tods(hdr->frame_control))
1355 		security_idx = IEEE80211_NUM_TIDS;
1356 
1357 skip_hdr_check:
1358 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1359 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1360 		     sizeof(status->iv));
1361 	if (ret <= 0) {
1362 		status->flag |= RX_FLAG_ONLY_MONITOR;
1363 		return;
1364 	}
1365 
1366 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1367 
1368 	if (status->flag & RX_FLAG_IV_STRIPPED)
1369 		status->flag |= RX_FLAG_PN_VALIDATED;
1370 }
1371 
1372 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1373 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1374 		    int len)
1375 {
1376 	struct mt76_wcid *wcid = status->wcid;
1377 	struct ieee80211_rx_status info = {
1378 		.enc_flags = status->enc_flags,
1379 		.rate_idx = status->rate_idx,
1380 		.encoding = status->encoding,
1381 		.band = status->band,
1382 		.nss = status->nss,
1383 		.bw = status->bw,
1384 	};
1385 	struct ieee80211_sta *sta;
1386 	u32 airtime;
1387 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1388 
1389 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1390 	spin_lock(&dev->cc_lock);
1391 	dev->cur_cc_bss_rx += airtime;
1392 	spin_unlock(&dev->cc_lock);
1393 
1394 	if (!wcid || !wcid->sta)
1395 		return;
1396 
1397 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1398 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1399 }
1400 
1401 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1402 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1403 {
1404 	struct mt76_wcid *wcid;
1405 	int wcid_idx;
1406 
1407 	if (!dev->rx_ampdu_len)
1408 		return;
1409 
1410 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1411 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1412 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1413 	else
1414 		wcid = NULL;
1415 	dev->rx_ampdu_status.wcid = wcid;
1416 
1417 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1418 
1419 	dev->rx_ampdu_len = 0;
1420 	dev->rx_ampdu_ref = 0;
1421 }
1422 
1423 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1424 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1425 {
1426 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1427 	struct mt76_wcid *wcid = status->wcid;
1428 
1429 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1430 		return;
1431 
1432 	if (!wcid || !wcid->sta) {
1433 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1434 
1435 		if (status->flag & RX_FLAG_8023)
1436 			return;
1437 
1438 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1439 			return;
1440 
1441 		wcid = NULL;
1442 	}
1443 
1444 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1445 	    status->ampdu_ref != dev->rx_ampdu_ref)
1446 		mt76_airtime_flush_ampdu(dev);
1447 
1448 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1449 		if (!dev->rx_ampdu_len ||
1450 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1451 			dev->rx_ampdu_status = *status;
1452 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1453 			dev->rx_ampdu_ref = status->ampdu_ref;
1454 		}
1455 
1456 		dev->rx_ampdu_len += skb->len;
1457 		return;
1458 	}
1459 
1460 	mt76_airtime_report(dev, status, skb->len);
1461 }
1462 
1463 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1464 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1465 {
1466 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1467 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1468 	struct ieee80211_sta *sta;
1469 	struct ieee80211_hw *hw;
1470 	struct mt76_wcid *wcid = status->wcid;
1471 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1472 	bool ps;
1473 
1474 	hw = mt76_phy_hw(dev, status->phy_idx);
1475 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1476 	    !(status->flag & RX_FLAG_8023)) {
1477 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1478 		if (sta)
1479 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1480 	}
1481 
1482 	mt76_airtime_check(dev, skb);
1483 
1484 	if (!wcid || !wcid->sta)
1485 		return;
1486 
1487 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1488 
1489 	if (status->signal <= 0)
1490 		ewma_signal_add(&wcid->rssi, -status->signal);
1491 
1492 	wcid->inactive_count = 0;
1493 
1494 	if (status->flag & RX_FLAG_8023)
1495 		return;
1496 
1497 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1498 		return;
1499 
1500 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1501 		ieee80211_sta_pspoll(sta);
1502 		return;
1503 	}
1504 
1505 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1506 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1507 	      ieee80211_is_data(hdr->frame_control)))
1508 		return;
1509 
1510 	ps = ieee80211_has_pm(hdr->frame_control);
1511 
1512 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1513 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1514 		ieee80211_sta_uapsd_trigger(sta, tidno);
1515 
1516 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1517 		return;
1518 
1519 	if (ps)
1520 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1521 
1522 	if (dev->drv->sta_ps)
1523 		dev->drv->sta_ps(dev, sta, ps);
1524 
1525 	if (!ps)
1526 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1527 
1528 	ieee80211_sta_ps_transition(sta, ps);
1529 }
1530 
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1531 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1532 		      struct napi_struct *napi)
1533 {
1534 	struct ieee80211_sta *sta;
1535 	struct ieee80211_hw *hw;
1536 	struct sk_buff *skb, *tmp;
1537 #if defined(__linux__)
1538 	LIST_HEAD(list);
1539 #elif defined(__FreeBSD__)
1540 	LINUX_LIST_HEAD(list);
1541 #endif
1542 
1543 	spin_lock(&dev->rx_lock);
1544 	while ((skb = __skb_dequeue(frames)) != NULL) {
1545 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1546 
1547 		mt76_check_ccmp_pn(skb);
1548 		skb_shinfo(skb)->frag_list = NULL;
1549 		mt76_rx_convert(dev, skb, &hw, &sta);
1550 		ieee80211_rx_list(hw, sta, skb, &list);
1551 
1552 		/* subsequent amsdu frames */
1553 		while (nskb) {
1554 			skb = nskb;
1555 			nskb = nskb->next;
1556 			skb->next = NULL;
1557 
1558 			mt76_rx_convert(dev, skb, &hw, &sta);
1559 			ieee80211_rx_list(hw, sta, skb, &list);
1560 		}
1561 	}
1562 	spin_unlock(&dev->rx_lock);
1563 
1564 	if (!napi) {
1565 		netif_receive_skb_list(&list);
1566 		return;
1567 	}
1568 
1569 	list_for_each_entry_safe(skb, tmp, &list, list) {
1570 		skb_list_del_init(skb);
1571 		napi_gro_receive(napi, skb);
1572 	}
1573 }
1574 
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1575 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1576 			   struct napi_struct *napi)
1577 {
1578 	struct sk_buff_head frames;
1579 	struct sk_buff *skb;
1580 
1581 	__skb_queue_head_init(&frames);
1582 
1583 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1584 		mt76_check_sta(dev, skb);
1585 		if (mtk_wed_device_active(&dev->mmio.wed))
1586 			__skb_queue_tail(&frames, skb);
1587 		else
1588 			mt76_rx_aggr_reorder(skb, &frames);
1589 	}
1590 
1591 	mt76_rx_complete(dev, &frames, napi);
1592 }
1593 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1594 
1595 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1596 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1597 	     struct ieee80211_sta *sta)
1598 {
1599 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1600 	struct mt76_dev *dev = phy->dev;
1601 	int ret;
1602 	int i;
1603 
1604 	mutex_lock(&dev->mutex);
1605 
1606 	ret = dev->drv->sta_add(dev, vif, sta);
1607 	if (ret)
1608 		goto out;
1609 
1610 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1611 		struct mt76_txq *mtxq;
1612 
1613 		if (!sta->txq[i])
1614 			continue;
1615 
1616 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1617 		mtxq->wcid = wcid->idx;
1618 	}
1619 
1620 	ewma_signal_init(&wcid->rssi);
1621 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1622 	phy->num_sta++;
1623 
1624 	mt76_wcid_init(wcid, phy->band_idx);
1625 out:
1626 	mutex_unlock(&dev->mutex);
1627 
1628 	return ret;
1629 }
1630 
__mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1631 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1632 		       struct ieee80211_sta *sta)
1633 {
1634 	struct mt76_dev *dev = phy->dev;
1635 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1636 	int i, idx = wcid->idx;
1637 
1638 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1639 		mt76_rx_aggr_stop(dev, wcid, i);
1640 
1641 	if (dev->drv->sta_remove)
1642 		dev->drv->sta_remove(dev, vif, sta);
1643 
1644 	mt76_wcid_cleanup(dev, wcid);
1645 
1646 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1647 	phy->num_sta--;
1648 }
1649 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1650 
1651 static void
mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1652 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1653 		struct ieee80211_sta *sta)
1654 {
1655 	struct mt76_dev *dev = phy->dev;
1656 
1657 	mutex_lock(&dev->mutex);
1658 	__mt76_sta_remove(phy, vif, sta);
1659 	mutex_unlock(&dev->mutex);
1660 }
1661 
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1662 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1663 		   struct ieee80211_sta *sta,
1664 		   enum ieee80211_sta_state old_state,
1665 		   enum ieee80211_sta_state new_state)
1666 {
1667 	struct mt76_phy *phy = hw->priv;
1668 	struct mt76_dev *dev = phy->dev;
1669 	enum mt76_sta_event ev;
1670 
1671 	phy = mt76_vif_phy(hw, vif);
1672 	if (!phy)
1673 		return -EINVAL;
1674 
1675 	if (old_state == IEEE80211_STA_NOTEXIST &&
1676 	    new_state == IEEE80211_STA_NONE)
1677 		return mt76_sta_add(phy, vif, sta);
1678 
1679 	if (old_state == IEEE80211_STA_NONE &&
1680 	    new_state == IEEE80211_STA_NOTEXIST)
1681 		mt76_sta_remove(phy, vif, sta);
1682 
1683 	if (!dev->drv->sta_event)
1684 		return 0;
1685 
1686 	if (old_state == IEEE80211_STA_AUTH &&
1687 	    new_state == IEEE80211_STA_ASSOC)
1688 		ev = MT76_STA_EVENT_ASSOC;
1689 	else if (old_state == IEEE80211_STA_ASSOC &&
1690 		 new_state == IEEE80211_STA_AUTHORIZED)
1691 		ev = MT76_STA_EVENT_AUTHORIZE;
1692 	else if (old_state == IEEE80211_STA_ASSOC &&
1693 		 new_state == IEEE80211_STA_AUTH)
1694 		ev = MT76_STA_EVENT_DISASSOC;
1695 	else
1696 		return 0;
1697 
1698 	return dev->drv->sta_event(dev, vif, sta, ev);
1699 }
1700 EXPORT_SYMBOL_GPL(mt76_sta_state);
1701 
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1702 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1703 			     struct ieee80211_sta *sta)
1704 {
1705 	struct mt76_phy *phy = hw->priv;
1706 	struct mt76_dev *dev = phy->dev;
1707 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1708 
1709 	mutex_lock(&dev->mutex);
1710 	spin_lock_bh(&dev->status_lock);
1711 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1712 	spin_unlock_bh(&dev->status_lock);
1713 	mutex_unlock(&dev->mutex);
1714 }
1715 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1716 
mt76_wcid_init(struct mt76_wcid * wcid,u8 band_idx)1717 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1718 {
1719 	wcid->hw_key_idx = -1;
1720 	wcid->phy_idx = band_idx;
1721 
1722 	INIT_LIST_HEAD(&wcid->tx_list);
1723 	skb_queue_head_init(&wcid->tx_pending);
1724 	skb_queue_head_init(&wcid->tx_offchannel);
1725 
1726 	INIT_LIST_HEAD(&wcid->list);
1727 	idr_init(&wcid->pktid);
1728 
1729 	INIT_LIST_HEAD(&wcid->poll_list);
1730 }
1731 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1732 
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1733 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1734 {
1735 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1736 	struct ieee80211_hw *hw;
1737 	struct sk_buff_head list;
1738 	struct sk_buff *skb;
1739 
1740 	mt76_tx_status_lock(dev, &list);
1741 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1742 	mt76_tx_status_unlock(dev, &list);
1743 
1744 	idr_destroy(&wcid->pktid);
1745 
1746 	spin_lock_bh(&phy->tx_lock);
1747 
1748 	if (!list_empty(&wcid->tx_list))
1749 		list_del_init(&wcid->tx_list);
1750 
1751 	spin_lock(&wcid->tx_pending.lock);
1752 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1753 	spin_unlock(&wcid->tx_pending.lock);
1754 
1755 	spin_lock(&wcid->tx_offchannel.lock);
1756 	skb_queue_splice_tail_init(&wcid->tx_offchannel, &list);
1757 	spin_unlock(&wcid->tx_offchannel.lock);
1758 
1759 	spin_unlock_bh(&phy->tx_lock);
1760 
1761 	while ((skb = __skb_dequeue(&list)) != NULL) {
1762 		hw = mt76_tx_status_get_hw(dev, skb);
1763 		ieee80211_free_txskb(hw, skb);
1764 	}
1765 }
1766 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1767 
mt76_wcid_add_poll(struct mt76_dev * dev,struct mt76_wcid * wcid)1768 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1769 {
1770 	if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
1771 		return;
1772 
1773 	spin_lock_bh(&dev->sta_poll_lock);
1774 	if (list_empty(&wcid->poll_list))
1775 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1776 	spin_unlock_bh(&dev->sta_poll_lock);
1777 }
1778 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1779 
mt76_get_power_bound(struct mt76_phy * phy,s8 txpower)1780 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
1781 {
1782 	int n_chains = hweight16(phy->chainmask);
1783 
1784 	txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
1785 	txpower -= mt76_tx_power_path_delta(n_chains);
1786 
1787 	return txpower;
1788 }
1789 EXPORT_SYMBOL_GPL(mt76_get_power_bound);
1790 
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,int * dbm)1791 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1792 		     unsigned int link_id, int *dbm)
1793 {
1794 	struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1795 	int n_chains, delta;
1796 
1797 	if (!phy)
1798 		return -EINVAL;
1799 
1800 	n_chains = hweight16(phy->chainmask);
1801 	delta = mt76_tx_power_path_delta(n_chains);
1802 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1803 
1804 	return 0;
1805 }
1806 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1807 
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1808 int mt76_init_sar_power(struct ieee80211_hw *hw,
1809 			const struct cfg80211_sar_specs *sar)
1810 {
1811 	struct mt76_phy *phy = hw->priv;
1812 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1813 	int i;
1814 
1815 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1816 		return -EINVAL;
1817 
1818 	for (i = 0; i < sar->num_sub_specs; i++) {
1819 		u32 index = sar->sub_specs[i].freq_range_index;
1820 		/* SAR specifies power limitaton in 0.25dbm */
1821 		s32 power = sar->sub_specs[i].power >> 1;
1822 
1823 		if (power > 127 || power < -127)
1824 			power = 127;
1825 
1826 		phy->frp[index].range = &capa->freq_ranges[index];
1827 		phy->frp[index].power = power;
1828 	}
1829 
1830 	return 0;
1831 }
1832 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1833 
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1834 int mt76_get_sar_power(struct mt76_phy *phy,
1835 		       struct ieee80211_channel *chan,
1836 		       int power)
1837 {
1838 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1839 	int freq, i;
1840 
1841 	if (!capa || !phy->frp)
1842 		return power;
1843 
1844 	if (power > 127 || power < -127)
1845 		power = 127;
1846 
1847 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1848 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1849 		if (phy->frp[i].range &&
1850 		    freq >= phy->frp[i].range->start_freq &&
1851 		    freq < phy->frp[i].range->end_freq) {
1852 			power = min_t(int, phy->frp[i].power, power);
1853 			break;
1854 		}
1855 	}
1856 
1857 	return power;
1858 }
1859 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1860 
1861 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1862 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1863 {
1864 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1865 		ieee80211_csa_finish(vif, 0);
1866 }
1867 
mt76_csa_finish(struct mt76_dev * dev)1868 void mt76_csa_finish(struct mt76_dev *dev)
1869 {
1870 	if (!dev->csa_complete)
1871 		return;
1872 
1873 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1874 		IEEE80211_IFACE_ITER_RESUME_ALL,
1875 		__mt76_csa_finish, dev);
1876 
1877 	dev->csa_complete = 0;
1878 }
1879 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1880 
1881 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1882 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1883 {
1884 	struct mt76_dev *dev = priv;
1885 
1886 	if (!vif->bss_conf.csa_active)
1887 		return;
1888 
1889 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1890 }
1891 
mt76_csa_check(struct mt76_dev * dev)1892 void mt76_csa_check(struct mt76_dev *dev)
1893 {
1894 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1895 		IEEE80211_IFACE_ITER_RESUME_ALL,
1896 		__mt76_csa_check, dev);
1897 }
1898 EXPORT_SYMBOL_GPL(mt76_csa_check);
1899 
1900 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1901 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1902 {
1903 	return 0;
1904 }
1905 EXPORT_SYMBOL_GPL(mt76_set_tim);
1906 
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1907 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1908 {
1909 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1910 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1911 	u8 *hdr, *pn = status->iv;
1912 
1913 	__skb_push(skb, 8);
1914 	memmove(skb->data, skb->data + 8, hdr_len);
1915 	hdr = skb->data + hdr_len;
1916 
1917 	hdr[0] = pn[5];
1918 	hdr[1] = pn[4];
1919 	hdr[2] = 0;
1920 	hdr[3] = 0x20 | (key_id << 6);
1921 	hdr[4] = pn[3];
1922 	hdr[5] = pn[2];
1923 	hdr[6] = pn[1];
1924 	hdr[7] = pn[0];
1925 
1926 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1927 }
1928 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1929 
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1930 int mt76_get_rate(struct mt76_dev *dev,
1931 		  struct ieee80211_supported_band *sband,
1932 		  int idx, bool cck)
1933 {
1934 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1935 	int i, offset = 0, len = sband->n_bitrates;
1936 
1937 	if (cck) {
1938 		if (!is_2g)
1939 			return 0;
1940 
1941 		idx &= ~BIT(2); /* short preamble */
1942 	} else if (is_2g) {
1943 		offset = 4;
1944 	}
1945 
1946 	for (i = offset; i < len; i++) {
1947 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1948 			return i;
1949 	}
1950 
1951 	return 0;
1952 }
1953 EXPORT_SYMBOL_GPL(mt76_get_rate);
1954 
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1955 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1956 		  const u8 *mac)
1957 {
1958 	struct mt76_phy *phy = hw->priv;
1959 
1960 	set_bit(MT76_SCANNING, &phy->state);
1961 }
1962 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1963 
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1964 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1965 {
1966 	struct mt76_phy *phy = hw->priv;
1967 
1968 	clear_bit(MT76_SCANNING, &phy->state);
1969 }
1970 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1971 
mt76_get_antenna(struct ieee80211_hw * hw,int radio_idx,u32 * tx_ant,u32 * rx_ant)1972 int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
1973 		     u32 *rx_ant)
1974 {
1975 	struct mt76_phy *phy = hw->priv;
1976 	struct mt76_dev *dev = phy->dev;
1977 	int i;
1978 
1979 	mutex_lock(&dev->mutex);
1980 	*tx_ant = 0;
1981 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1982 		if (dev->phys[i] && dev->phys[i]->hw == hw)
1983 			*tx_ant |= dev->phys[i]->chainmask;
1984 	*rx_ant = *tx_ant;
1985 	mutex_unlock(&dev->mutex);
1986 
1987 	return 0;
1988 }
1989 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1990 
1991 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)1992 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1993 		int ring_base, void *wed, u32 flags)
1994 {
1995 	struct mt76_queue *hwq;
1996 	int err;
1997 
1998 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1999 	if (!hwq)
2000 		return ERR_PTR(-ENOMEM);
2001 
2002 	hwq->flags = flags;
2003 	hwq->wed = wed;
2004 
2005 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
2006 	if (err < 0)
2007 		return ERR_PTR(err);
2008 
2009 	return hwq;
2010 }
2011 EXPORT_SYMBOL_GPL(mt76_init_queue);
2012 
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)2013 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
2014 			 struct mt76_sta_stats *stats, bool eht)
2015 {
2016 	int i, ei = wi->initial_stat_idx;
2017 	u64 *data = wi->data;
2018 
2019 	wi->sta_count++;
2020 
2021 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
2022 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
2023 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
2024 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
2025 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
2026 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
2027 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
2028 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
2029 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
2030 	if (eht) {
2031 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
2032 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
2033 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
2034 	}
2035 
2036 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
2037 		data[ei++] += stats->tx_bw[i];
2038 
2039 	for (i = 0; i < (eht ? 14 : 12); i++)
2040 		data[ei++] += stats->tx_mcs[i];
2041 
2042 	for (i = 0; i < 4; i++)
2043 		data[ei++] += stats->tx_nss[i];
2044 
2045 	wi->worker_stat_count = ei - wi->initial_stat_idx;
2046 }
2047 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
2048 
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)2049 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
2050 {
2051 #ifdef CONFIG_PAGE_POOL_STATS
2052 	struct page_pool_stats stats = {};
2053 	int i;
2054 
2055 	mt76_for_each_q_rx(dev, i)
2056 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
2057 
2058 	page_pool_ethtool_stats_get(data, &stats);
2059 	*index += page_pool_ethtool_stats_get_count();
2060 #endif
2061 }
2062 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
2063 
mt76_phy_dfs_state(struct mt76_phy * phy)2064 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
2065 {
2066 	struct ieee80211_hw *hw = phy->hw;
2067 	struct mt76_dev *dev = phy->dev;
2068 
2069 	if (dev->region == NL80211_DFS_UNSET ||
2070 	    test_bit(MT76_SCANNING, &phy->state))
2071 		return MT_DFS_STATE_DISABLED;
2072 
2073 	if (!phy->radar_enabled) {
2074 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
2075 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
2076 			return MT_DFS_STATE_ACTIVE;
2077 
2078 		return MT_DFS_STATE_DISABLED;
2079 	}
2080 
2081 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
2082 		return MT_DFS_STATE_CAC;
2083 
2084 	return MT_DFS_STATE_ACTIVE;
2085 }
2086 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
2087 
mt76_vif_cleanup(struct mt76_dev * dev,struct ieee80211_vif * vif)2088 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
2089 {
2090 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2091 	struct mt76_vif_data *mvif = mlink->mvif;
2092 
2093 	rcu_assign_pointer(mvif->link[0], NULL);
2094 	mt76_abort_scan(dev);
2095 	if (mvif->roc_phy)
2096 		mt76_abort_roc(mvif->roc_phy);
2097 }
2098 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2099