xref: /linux/drivers/net/wireless/mediatek/mt76/mac80211.c (revision d69eb204c255c35abd9e8cb621484e8074c75eaa)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 	struct device_node *np = dev->dev->of_node;
201 
202 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 		return 0;
204 
205 	np = of_get_child_by_name(np, "led");
206 	if (np) {
207 		if (!of_device_is_available(np)) {
208 			of_node_put(np);
209 			dev_info(dev->dev,
210 				"led registration was explicitly disabled by dts\n");
211 			return 0;
212 		}
213 
214 		if (phy == &dev->phy) {
215 			int led_pin;
216 
217 			if (!of_property_read_u32(np, "led-sources", &led_pin))
218 				phy->leds.pin = led_pin;
219 
220 			phy->leds.al =
221 				of_property_read_bool(np, "led-active-low");
222 		}
223 
224 		of_node_put(np);
225 	}
226 
227 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 		 wiphy_name(hw->wiphy));
229 
230 	phy->leds.cdev.name = phy->leds.name;
231 	phy->leds.cdev.default_trigger =
232 		ieee80211_create_tpt_led_trigger(hw,
233 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 					mt76_tpt_blink,
235 					ARRAY_SIZE(mt76_tpt_blink));
236 
237 	dev_info(dev->dev,
238 		"registering led '%s'\n", phy->leds.name);
239 
240 	return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242 
mt76_led_cleanup(struct mt76_phy * phy)243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 		return;
247 
248 	led_classdev_unregister(&phy->leds.cdev);
249 }
250 
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 				 struct ieee80211_supported_band *sband,
253 				 bool vht)
254 {
255 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 	int i, nstream = hweight8(phy->antenna_mask);
257 	struct ieee80211_sta_vht_cap *vht_cap;
258 	u16 mcs_map = 0;
259 
260 	if (nstream > 1)
261 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 	else
263 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264 
265 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267 
268 	if (!vht)
269 		return;
270 
271 	vht_cap = &sband->vht_cap;
272 	if (nstream > 1)
273 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 	else
275 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278 
279 	for (i = 0; i < 8; i++) {
280 		if (i < nstream)
281 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 		else
283 			mcs_map |=
284 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 	}
286 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 		vht_cap->vht_mcs.tx_highest |=
290 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292 
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 	if (phy->cap.has_2ghz)
296 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 	if (phy->cap.has_5ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 	if (phy->cap.has_6ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303 
304 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 		const struct ieee80211_channel *chan, int n_chan,
307 		struct ieee80211_rate *rates, int n_rates,
308 		bool ht, bool vht)
309 {
310 	struct ieee80211_supported_band *sband = &msband->sband;
311 	struct ieee80211_sta_vht_cap *vht_cap;
312 	struct ieee80211_sta_ht_cap *ht_cap;
313 	struct mt76_dev *dev = phy->dev;
314 	void *chanlist;
315 	int size;
316 
317 	size = n_chan * sizeof(*chan);
318 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 	if (!chanlist)
320 		return -ENOMEM;
321 
322 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 				    GFP_KERNEL);
324 	if (!msband->chan)
325 		return -ENOMEM;
326 
327 	sband->channels = chanlist;
328 	sband->n_channels = n_chan;
329 	sband->bitrates = rates;
330 	sband->n_bitrates = n_rates;
331 
332 	if (!ht)
333 		return 0;
334 
335 	ht_cap = &sband->ht_cap;
336 	ht_cap->ht_supported = true;
337 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 		       IEEE80211_HT_CAP_GRN_FLD |
339 		       IEEE80211_HT_CAP_SGI_20 |
340 		       IEEE80211_HT_CAP_SGI_40 |
341 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342 
343 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345 
346 	mt76_init_stream_cap(phy, sband, vht);
347 
348 	if (!vht)
349 		return 0;
350 
351 	vht_cap = &sband->vht_cap;
352 	vht_cap->vht_supported = true;
353 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 			IEEE80211_VHT_CAP_RXSTBC_1 |
355 			IEEE80211_VHT_CAP_SHORT_GI_80 |
356 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357 
358 	return 0;
359 }
360 
361 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369 			       n_rates, true, false);
370 }
371 
372 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates, bool vht)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380 			       n_rates, true, vht);
381 }
382 
383 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 		   int n_rates)
386 {
387 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388 
389 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391 			       n_rates, false, false);
392 }
393 
394 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 		 enum nl80211_band band)
397 {
398 	struct ieee80211_supported_band *sband = &msband->sband;
399 	bool found = false;
400 	int i;
401 
402 	if (!sband)
403 		return;
404 
405 	for (i = 0; i < sband->n_channels; i++) {
406 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 			continue;
408 
409 		found = true;
410 		break;
411 	}
412 
413 	if (found) {
414 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
415 					NL80211_CHAN_HT20);
416 		phy->chan_state = &msband->chan[0];
417 		phy->dev->band_phys[band] = phy;
418 		return;
419 	}
420 
421 	sband->n_channels = 0;
422 	if (phy->hw->wiphy->bands[band] == sband)
423 		phy->hw->wiphy->bands[band] = NULL;
424 }
425 
426 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)427 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
428 {
429 	struct mt76_dev *dev = phy->dev;
430 	struct wiphy *wiphy = hw->wiphy;
431 
432 	INIT_LIST_HEAD(&phy->tx_list);
433 	spin_lock_init(&phy->tx_lock);
434 	INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
435 
436 	if ((void *)phy != hw->priv)
437 		return 0;
438 
439 	SET_IEEE80211_DEV(hw, dev->dev);
440 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
441 
442 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
443 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
444 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
445 			WIPHY_FLAG_SUPPORTS_TDLS |
446 			WIPHY_FLAG_AP_UAPSD;
447 
448 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
449 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
450 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
451 
452 	if (!wiphy->available_antennas_tx)
453 		wiphy->available_antennas_tx = phy->antenna_mask;
454 	if (!wiphy->available_antennas_rx)
455 		wiphy->available_antennas_rx = phy->antenna_mask;
456 
457 	wiphy->sar_capa = &mt76_sar_capa;
458 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
459 				sizeof(struct mt76_freq_range_power),
460 				GFP_KERNEL);
461 	if (!phy->frp)
462 		return -ENOMEM;
463 
464 	hw->txq_data_size = sizeof(struct mt76_txq);
465 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
466 
467 	if (!hw->max_tx_fragments)
468 		hw->max_tx_fragments = 16;
469 
470 	ieee80211_hw_set(hw, SIGNAL_DBM);
471 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
472 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
473 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
474 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
475 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
476 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
477 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
478 
479 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
480 	    hw->max_tx_fragments > 1) {
481 		ieee80211_hw_set(hw, TX_AMSDU);
482 		ieee80211_hw_set(hw, TX_FRAG_LIST);
483 	}
484 
485 	ieee80211_hw_set(hw, MFP_CAPABLE);
486 	ieee80211_hw_set(hw, AP_LINK_PS);
487 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
488 
489 	return 0;
490 }
491 
492 struct mt76_phy *
mt76_alloc_radio_phy(struct mt76_dev * dev,unsigned int size,u8 band_idx)493 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
494 		     u8 band_idx)
495 {
496 	struct ieee80211_hw *hw = dev->phy.hw;
497 	unsigned int phy_size;
498 	struct mt76_phy *phy;
499 
500 	phy_size = ALIGN(sizeof(*phy), 8);
501 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
502 	if (!phy)
503 		return NULL;
504 
505 	phy->dev = dev;
506 	phy->hw = hw;
507 	phy->priv = (void *)phy + phy_size;
508 	phy->band_idx = band_idx;
509 
510 	return phy;
511 }
512 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
513 
514 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)515 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
516 	       const struct ieee80211_ops *ops, u8 band_idx)
517 {
518 	struct ieee80211_hw *hw;
519 	unsigned int phy_size;
520 	struct mt76_phy *phy;
521 
522 	phy_size = ALIGN(sizeof(*phy), 8);
523 	hw = ieee80211_alloc_hw(size + phy_size, ops);
524 	if (!hw)
525 		return NULL;
526 
527 	phy = hw->priv;
528 	phy->dev = dev;
529 	phy->hw = hw;
530 	phy->priv = hw->priv + phy_size;
531 	phy->band_idx = band_idx;
532 
533 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
534 	hw->wiphy->interface_modes =
535 		BIT(NL80211_IFTYPE_STATION) |
536 		BIT(NL80211_IFTYPE_AP) |
537 #ifdef CONFIG_MAC80211_MESH
538 		BIT(NL80211_IFTYPE_MESH_POINT) |
539 #endif
540 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
541 		BIT(NL80211_IFTYPE_P2P_GO) |
542 		BIT(NL80211_IFTYPE_ADHOC);
543 
544 	return phy;
545 }
546 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
547 
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)548 int mt76_register_phy(struct mt76_phy *phy, bool vht,
549 		      struct ieee80211_rate *rates, int n_rates)
550 {
551 	int ret;
552 
553 	ret = mt76_phy_init(phy, phy->hw);
554 	if (ret)
555 		return ret;
556 
557 	if (phy->cap.has_2ghz) {
558 		ret = mt76_init_sband_2g(phy, rates, n_rates);
559 		if (ret)
560 			return ret;
561 	}
562 
563 	if (phy->cap.has_5ghz) {
564 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
565 		if (ret)
566 			return ret;
567 	}
568 
569 	if (phy->cap.has_6ghz) {
570 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
571 		if (ret)
572 			return ret;
573 	}
574 
575 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
576 		ret = mt76_led_init(phy);
577 		if (ret)
578 			return ret;
579 	}
580 
581 	wiphy_read_of_freq_limits(phy->hw->wiphy);
582 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
583 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
584 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
585 
586 	if ((void *)phy == phy->hw->priv) {
587 		ret = ieee80211_register_hw(phy->hw);
588 		if (ret)
589 			return ret;
590 	}
591 
592 	set_bit(MT76_STATE_REGISTERED, &phy->state);
593 	phy->dev->phys[phy->band_idx] = phy;
594 
595 	return 0;
596 }
597 EXPORT_SYMBOL_GPL(mt76_register_phy);
598 
mt76_unregister_phy(struct mt76_phy * phy)599 void mt76_unregister_phy(struct mt76_phy *phy)
600 {
601 	struct mt76_dev *dev = phy->dev;
602 
603 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
604 		return;
605 
606 	if (IS_ENABLED(CONFIG_MT76_LEDS))
607 		mt76_led_cleanup(phy);
608 	mt76_tx_status_check(dev, true);
609 	ieee80211_unregister_hw(phy->hw);
610 	dev->phys[phy->band_idx] = NULL;
611 }
612 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
613 
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)614 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
615 {
616 	bool is_qrx = mt76_queue_is_rx(dev, q);
617 	struct page_pool_params pp_params = {
618 		.order = 0,
619 		.flags = 0,
620 		.nid = NUMA_NO_NODE,
621 		.dev = dev->dma_dev,
622 	};
623 	int idx = is_qrx ? q - dev->q_rx : -1;
624 
625 	/* Allocate page_pools just for rx/wed_tx_free queues */
626 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
627 		return 0;
628 
629 	switch (idx) {
630 	case MT_RXQ_MAIN:
631 	case MT_RXQ_BAND1:
632 	case MT_RXQ_BAND2:
633 		pp_params.pool_size = 256;
634 		break;
635 	default:
636 		pp_params.pool_size = 16;
637 		break;
638 	}
639 
640 	if (mt76_is_mmio(dev)) {
641 		/* rely on page_pool for DMA mapping */
642 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
643 		pp_params.dma_dir = DMA_FROM_DEVICE;
644 		pp_params.max_len = PAGE_SIZE;
645 		pp_params.offset = 0;
646 		/* NAPI is available just for rx queues */
647 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
648 			pp_params.napi = &dev->napi[idx];
649 	}
650 
651 	q->page_pool = page_pool_create(&pp_params);
652 	if (IS_ERR(q->page_pool)) {
653 		int err = PTR_ERR(q->page_pool);
654 
655 		q->page_pool = NULL;
656 		return err;
657 	}
658 
659 	return 0;
660 }
661 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
662 
663 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)664 mt76_alloc_device(struct device *pdev, unsigned int size,
665 		  const struct ieee80211_ops *ops,
666 		  const struct mt76_driver_ops *drv_ops)
667 {
668 	struct ieee80211_hw *hw;
669 	struct mt76_phy *phy;
670 	struct mt76_dev *dev;
671 	int i;
672 
673 	hw = ieee80211_alloc_hw(size, ops);
674 	if (!hw)
675 		return NULL;
676 
677 	dev = hw->priv;
678 	dev->hw = hw;
679 	dev->dev = pdev;
680 	dev->drv = drv_ops;
681 	dev->dma_dev = pdev;
682 
683 	phy = &dev->phy;
684 	phy->dev = dev;
685 	phy->hw = hw;
686 	phy->band_idx = MT_BAND0;
687 	dev->phys[phy->band_idx] = phy;
688 
689 	spin_lock_init(&dev->rx_lock);
690 	spin_lock_init(&dev->lock);
691 	spin_lock_init(&dev->cc_lock);
692 	spin_lock_init(&dev->status_lock);
693 	spin_lock_init(&dev->wed_lock);
694 	mutex_init(&dev->mutex);
695 	init_waitqueue_head(&dev->tx_wait);
696 
697 	skb_queue_head_init(&dev->mcu.res_q);
698 	init_waitqueue_head(&dev->mcu.wait);
699 	mutex_init(&dev->mcu.mutex);
700 	dev->tx_worker.fn = mt76_tx_worker;
701 
702 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
703 	hw->wiphy->interface_modes =
704 		BIT(NL80211_IFTYPE_STATION) |
705 		BIT(NL80211_IFTYPE_AP) |
706 #ifdef CONFIG_MAC80211_MESH
707 		BIT(NL80211_IFTYPE_MESH_POINT) |
708 #endif
709 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
710 		BIT(NL80211_IFTYPE_P2P_GO) |
711 		BIT(NL80211_IFTYPE_ADHOC);
712 
713 	spin_lock_init(&dev->token_lock);
714 	idr_init(&dev->token);
715 
716 	spin_lock_init(&dev->rx_token_lock);
717 	idr_init(&dev->rx_token);
718 
719 	INIT_LIST_HEAD(&dev->wcid_list);
720 	INIT_LIST_HEAD(&dev->sta_poll_list);
721 	spin_lock_init(&dev->sta_poll_lock);
722 
723 	INIT_LIST_HEAD(&dev->txwi_cache);
724 	INIT_LIST_HEAD(&dev->rxwi_cache);
725 	dev->token_size = dev->drv->token_size;
726 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
727 
728 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
729 		skb_queue_head_init(&dev->rx_skb[i]);
730 
731 	dev->wq = alloc_ordered_workqueue("mt76", 0);
732 	if (!dev->wq) {
733 		ieee80211_free_hw(hw);
734 		return NULL;
735 	}
736 
737 	return dev;
738 }
739 EXPORT_SYMBOL_GPL(mt76_alloc_device);
740 
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)741 int mt76_register_device(struct mt76_dev *dev, bool vht,
742 			 struct ieee80211_rate *rates, int n_rates)
743 {
744 	struct ieee80211_hw *hw = dev->hw;
745 	struct mt76_phy *phy = &dev->phy;
746 	int ret;
747 
748 	dev_set_drvdata(dev->dev, dev);
749 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
750 	ret = mt76_phy_init(phy, hw);
751 	if (ret)
752 		return ret;
753 
754 	if (phy->cap.has_2ghz) {
755 		ret = mt76_init_sband_2g(phy, rates, n_rates);
756 		if (ret)
757 			return ret;
758 	}
759 
760 	if (phy->cap.has_5ghz) {
761 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
762 		if (ret)
763 			return ret;
764 	}
765 
766 	if (phy->cap.has_6ghz) {
767 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
768 		if (ret)
769 			return ret;
770 	}
771 
772 	wiphy_read_of_freq_limits(hw->wiphy);
773 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
774 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
775 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
776 
777 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
778 		ret = mt76_led_init(phy);
779 		if (ret)
780 			return ret;
781 	}
782 
783 	ret = ieee80211_register_hw(hw);
784 	if (ret)
785 		return ret;
786 
787 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
788 	set_bit(MT76_STATE_REGISTERED, &phy->state);
789 	sched_set_fifo_low(dev->tx_worker.task);
790 
791 	return 0;
792 }
793 EXPORT_SYMBOL_GPL(mt76_register_device);
794 
mt76_unregister_device(struct mt76_dev * dev)795 void mt76_unregister_device(struct mt76_dev *dev)
796 {
797 	struct ieee80211_hw *hw = dev->hw;
798 
799 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
800 		return;
801 
802 	if (IS_ENABLED(CONFIG_MT76_LEDS))
803 		mt76_led_cleanup(&dev->phy);
804 	mt76_tx_status_check(dev, true);
805 	mt76_wcid_cleanup(dev, &dev->global_wcid);
806 	ieee80211_unregister_hw(hw);
807 }
808 EXPORT_SYMBOL_GPL(mt76_unregister_device);
809 
mt76_free_device(struct mt76_dev * dev)810 void mt76_free_device(struct mt76_dev *dev)
811 {
812 	mt76_worker_teardown(&dev->tx_worker);
813 	if (dev->wq) {
814 		destroy_workqueue(dev->wq);
815 		dev->wq = NULL;
816 	}
817 	ieee80211_free_hw(dev->hw);
818 }
819 EXPORT_SYMBOL_GPL(mt76_free_device);
820 
mt76_reset_phy(struct mt76_phy * phy)821 static void mt76_reset_phy(struct mt76_phy *phy)
822 {
823 	if (!phy)
824 		return;
825 
826 	INIT_LIST_HEAD(&phy->tx_list);
827 }
828 
mt76_reset_device(struct mt76_dev * dev)829 void mt76_reset_device(struct mt76_dev *dev)
830 {
831 	int i;
832 
833 	rcu_read_lock();
834 	for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
835 		struct mt76_wcid *wcid;
836 
837 		wcid = rcu_dereference(dev->wcid[i]);
838 		if (!wcid)
839 			continue;
840 
841 		wcid->sta = 0;
842 		mt76_wcid_cleanup(dev, wcid);
843 		rcu_assign_pointer(dev->wcid[i], NULL);
844 	}
845 	rcu_read_unlock();
846 
847 	INIT_LIST_HEAD(&dev->wcid_list);
848 	INIT_LIST_HEAD(&dev->sta_poll_list);
849 	dev->vif_mask = 0;
850 	memset(dev->wcid_mask, 0, sizeof(dev->wcid_mask));
851 
852 	mt76_reset_phy(&dev->phy);
853 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
854 		mt76_reset_phy(dev->phys[i]);
855 }
856 EXPORT_SYMBOL_GPL(mt76_reset_device);
857 
mt76_vif_phy(struct ieee80211_hw * hw,struct ieee80211_vif * vif)858 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
859 			      struct ieee80211_vif *vif)
860 {
861 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
862 	struct mt76_chanctx *ctx;
863 
864 	if (!hw->wiphy->n_radio)
865 		return hw->priv;
866 
867 	if (!mlink->ctx)
868 		return NULL;
869 
870 	ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
871 	return ctx->phy;
872 }
873 EXPORT_SYMBOL_GPL(mt76_vif_phy);
874 
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)875 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
876 {
877 	struct sk_buff *skb = phy->rx_amsdu[q].head;
878 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
879 	struct mt76_dev *dev = phy->dev;
880 
881 	phy->rx_amsdu[q].head = NULL;
882 	phy->rx_amsdu[q].tail = NULL;
883 
884 	/*
885 	 * Validate if the amsdu has a proper first subframe.
886 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
887 	 * flag of the QoS header gets flipped. In such cases, the first
888 	 * subframe has a LLC/SNAP header in the location of the destination
889 	 * address.
890 	 */
891 	if (skb_shinfo(skb)->frag_list) {
892 		int offset = 0;
893 
894 		if (!(status->flag & RX_FLAG_8023)) {
895 			offset = ieee80211_get_hdrlen_from_skb(skb);
896 
897 			if ((status->flag &
898 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
899 			    RX_FLAG_DECRYPTED)
900 				offset += 8;
901 		}
902 
903 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
904 			dev_kfree_skb(skb);
905 			return;
906 		}
907 	}
908 	__skb_queue_tail(&dev->rx_skb[q], skb);
909 }
910 
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)911 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
912 				  struct sk_buff *skb)
913 {
914 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
915 
916 	if (phy->rx_amsdu[q].head &&
917 	    (!status->amsdu || status->first_amsdu ||
918 	     status->seqno != phy->rx_amsdu[q].seqno))
919 		mt76_rx_release_amsdu(phy, q);
920 
921 	if (!phy->rx_amsdu[q].head) {
922 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
923 		phy->rx_amsdu[q].seqno = status->seqno;
924 		phy->rx_amsdu[q].head = skb;
925 	} else {
926 		*phy->rx_amsdu[q].tail = skb;
927 		phy->rx_amsdu[q].tail = &skb->next;
928 	}
929 
930 	if (!status->amsdu || status->last_amsdu)
931 		mt76_rx_release_amsdu(phy, q);
932 }
933 
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)934 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
935 {
936 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
937 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
938 
939 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
940 		dev_kfree_skb(skb);
941 		return;
942 	}
943 
944 #ifdef CONFIG_NL80211_TESTMODE
945 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
946 		phy->test.rx_stats.packets[q]++;
947 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
948 			phy->test.rx_stats.fcs_error[q]++;
949 	}
950 #endif
951 
952 	mt76_rx_release_burst(phy, q, skb);
953 }
954 EXPORT_SYMBOL_GPL(mt76_rx);
955 
mt76_has_tx_pending(struct mt76_phy * phy)956 bool mt76_has_tx_pending(struct mt76_phy *phy)
957 {
958 	struct mt76_queue *q;
959 	int i;
960 
961 	for (i = 0; i < __MT_TXQ_MAX; i++) {
962 		q = phy->q_tx[i];
963 		if (q && q->queued)
964 			return true;
965 	}
966 
967 	return false;
968 }
969 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
970 
971 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)972 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
973 {
974 	struct mt76_sband *msband;
975 	int idx;
976 
977 	if (c->band == NL80211_BAND_2GHZ)
978 		msband = &phy->sband_2g;
979 	else if (c->band == NL80211_BAND_6GHZ)
980 		msband = &phy->sband_6g;
981 	else
982 		msband = &phy->sband_5g;
983 
984 	idx = c - &msband->sband.channels[0];
985 	return &msband->chan[idx];
986 }
987 
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)988 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
989 {
990 	struct mt76_channel_state *state = phy->chan_state;
991 
992 	state->cc_active += ktime_to_us(ktime_sub(time,
993 						  phy->survey_time));
994 	phy->survey_time = time;
995 }
996 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
997 
mt76_update_survey(struct mt76_phy * phy)998 void mt76_update_survey(struct mt76_phy *phy)
999 {
1000 	struct mt76_dev *dev = phy->dev;
1001 	ktime_t cur_time;
1002 
1003 	if (dev->drv->update_survey)
1004 		dev->drv->update_survey(phy);
1005 
1006 	cur_time = ktime_get_boottime();
1007 	mt76_update_survey_active_time(phy, cur_time);
1008 
1009 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
1010 		struct mt76_channel_state *state = phy->chan_state;
1011 
1012 		spin_lock_bh(&dev->cc_lock);
1013 		state->cc_bss_rx += dev->cur_cc_bss_rx;
1014 		dev->cur_cc_bss_rx = 0;
1015 		spin_unlock_bh(&dev->cc_lock);
1016 	}
1017 }
1018 EXPORT_SYMBOL_GPL(mt76_update_survey);
1019 
__mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1020 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1021 		       bool offchannel)
1022 {
1023 	struct mt76_dev *dev = phy->dev;
1024 	int timeout = HZ / 5;
1025 	int ret;
1026 
1027 	set_bit(MT76_RESET, &phy->state);
1028 
1029 	mt76_worker_disable(&dev->tx_worker);
1030 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
1031 	mt76_update_survey(phy);
1032 
1033 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
1034 	    phy->chandef.width != chandef->width)
1035 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
1036 
1037 	phy->chandef = *chandef;
1038 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
1039 	phy->offchannel = offchannel;
1040 
1041 	if (!offchannel)
1042 		phy->main_chandef = *chandef;
1043 
1044 	if (chandef->chan != phy->main_chandef.chan)
1045 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1046 
1047 	ret = dev->drv->set_channel(phy);
1048 
1049 	clear_bit(MT76_RESET, &phy->state);
1050 	mt76_worker_enable(&dev->tx_worker);
1051 	mt76_worker_schedule(&dev->tx_worker);
1052 
1053 	return ret;
1054 }
1055 
mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1056 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1057 		     bool offchannel)
1058 {
1059 	struct mt76_dev *dev = phy->dev;
1060 	int ret;
1061 
1062 	cancel_delayed_work_sync(&phy->mac_work);
1063 
1064 	mutex_lock(&dev->mutex);
1065 	ret = __mt76_set_channel(phy, chandef, offchannel);
1066 	mutex_unlock(&dev->mutex);
1067 
1068 	return ret;
1069 }
1070 
mt76_update_channel(struct mt76_phy * phy)1071 int mt76_update_channel(struct mt76_phy *phy)
1072 {
1073 	struct ieee80211_hw *hw = phy->hw;
1074 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1075 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1076 
1077 	phy->radar_enabled = hw->conf.radar_enabled;
1078 
1079 	return mt76_set_channel(phy, chandef, offchannel);
1080 }
1081 EXPORT_SYMBOL_GPL(mt76_update_channel);
1082 
1083 static struct mt76_sband *
mt76_get_survey_sband(struct mt76_phy * phy,int * idx)1084 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1085 {
1086 	if (*idx < phy->sband_2g.sband.n_channels)
1087 		return &phy->sband_2g;
1088 
1089 	*idx -= phy->sband_2g.sband.n_channels;
1090 	if (*idx < phy->sband_5g.sband.n_channels)
1091 		return &phy->sband_5g;
1092 
1093 	*idx -= phy->sband_5g.sband.n_channels;
1094 	if (*idx < phy->sband_6g.sband.n_channels)
1095 		return &phy->sband_6g;
1096 
1097 	*idx -= phy->sband_6g.sband.n_channels;
1098 	return NULL;
1099 }
1100 
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1101 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1102 		    struct survey_info *survey)
1103 {
1104 	struct mt76_phy *phy = hw->priv;
1105 	struct mt76_dev *dev = phy->dev;
1106 	struct mt76_sband *sband = NULL;
1107 	struct ieee80211_channel *chan;
1108 	struct mt76_channel_state *state;
1109 	int phy_idx = 0;
1110 	int ret = 0;
1111 
1112 	mutex_lock(&dev->mutex);
1113 
1114 	for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1115 		sband = NULL;
1116 		phy = dev->phys[phy_idx];
1117 		if (!phy || phy->hw != hw)
1118 			continue;
1119 
1120 		sband = mt76_get_survey_sband(phy, &idx);
1121 
1122 		if (idx == 0 && phy->dev->drv->update_survey)
1123 			mt76_update_survey(phy);
1124 
1125 		if (sband || !hw->wiphy->n_radio)
1126 			break;
1127 	}
1128 
1129 	if (!sband) {
1130 		ret = -ENOENT;
1131 		goto out;
1132 	}
1133 
1134 	chan = &sband->sband.channels[idx];
1135 	state = mt76_channel_state(phy, chan);
1136 
1137 	memset(survey, 0, sizeof(*survey));
1138 	survey->channel = chan;
1139 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1140 	survey->filled |= dev->drv->survey_flags;
1141 	if (state->noise)
1142 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1143 
1144 	if (chan == phy->main_chandef.chan) {
1145 		survey->filled |= SURVEY_INFO_IN_USE;
1146 
1147 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1148 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1149 	}
1150 
1151 	survey->time_busy = div_u64(state->cc_busy, 1000);
1152 	survey->time_rx = div_u64(state->cc_rx, 1000);
1153 	survey->time = div_u64(state->cc_active, 1000);
1154 	survey->noise = state->noise;
1155 
1156 	spin_lock_bh(&dev->cc_lock);
1157 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1158 	survey->time_tx = div_u64(state->cc_tx, 1000);
1159 	spin_unlock_bh(&dev->cc_lock);
1160 
1161 out:
1162 	mutex_unlock(&dev->mutex);
1163 
1164 	return ret;
1165 }
1166 EXPORT_SYMBOL_GPL(mt76_get_survey);
1167 
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1168 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1169 			 struct ieee80211_key_conf *key)
1170 {
1171 	struct ieee80211_key_seq seq;
1172 	int i;
1173 
1174 	wcid->rx_check_pn = false;
1175 
1176 	if (!key)
1177 		return;
1178 
1179 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1180 		return;
1181 
1182 	wcid->rx_check_pn = true;
1183 
1184 	/* data frame */
1185 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1186 		ieee80211_get_key_rx_seq(key, i, &seq);
1187 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1188 	}
1189 
1190 	/* robust management frame */
1191 	ieee80211_get_key_rx_seq(key, -1, &seq);
1192 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1193 
1194 }
1195 EXPORT_SYMBOL(mt76_wcid_key_setup);
1196 
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1197 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1198 {
1199 	int signal = -128;
1200 	u8 chains;
1201 
1202 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1203 		int cur, diff;
1204 
1205 		cur = *chain_signal;
1206 		if (!(chains & BIT(0)) ||
1207 		    cur > 0)
1208 			continue;
1209 
1210 		if (cur > signal)
1211 			swap(cur, signal);
1212 
1213 		diff = signal - cur;
1214 		if (diff == 0)
1215 			signal += 3;
1216 		else if (diff <= 2)
1217 			signal += 2;
1218 		else if (diff <= 6)
1219 			signal += 1;
1220 	}
1221 
1222 	return signal;
1223 }
1224 EXPORT_SYMBOL(mt76_rx_signal);
1225 
1226 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1227 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1228 		struct ieee80211_hw **hw,
1229 		struct ieee80211_sta **sta)
1230 {
1231 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1232 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1233 	struct mt76_rx_status mstat;
1234 
1235 	mstat = *((struct mt76_rx_status *)skb->cb);
1236 	memset(status, 0, sizeof(*status));
1237 
1238 	status->flag = mstat.flag;
1239 	status->freq = mstat.freq;
1240 	status->enc_flags = mstat.enc_flags;
1241 	status->encoding = mstat.encoding;
1242 	status->bw = mstat.bw;
1243 	if (status->encoding == RX_ENC_EHT) {
1244 		status->eht.ru = mstat.eht.ru;
1245 		status->eht.gi = mstat.eht.gi;
1246 	} else {
1247 		status->he_ru = mstat.he_ru;
1248 		status->he_gi = mstat.he_gi;
1249 		status->he_dcm = mstat.he_dcm;
1250 	}
1251 	status->rate_idx = mstat.rate_idx;
1252 	status->nss = mstat.nss;
1253 	status->band = mstat.band;
1254 	status->signal = mstat.signal;
1255 	status->chains = mstat.chains;
1256 	status->ampdu_reference = mstat.ampdu_ref;
1257 	status->device_timestamp = mstat.timestamp;
1258 	status->mactime = mstat.timestamp;
1259 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1260 	if (status->signal <= -128)
1261 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1262 
1263 	if (ieee80211_is_beacon(hdr->frame_control) ||
1264 	    ieee80211_is_probe_resp(hdr->frame_control))
1265 		status->boottime_ns = ktime_get_boottime_ns();
1266 
1267 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1268 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1269 		     sizeof(mstat.chain_signal));
1270 	memcpy(status->chain_signal, mstat.chain_signal,
1271 	       sizeof(mstat.chain_signal));
1272 
1273 	if (mstat.wcid) {
1274 		status->link_valid = mstat.wcid->link_valid;
1275 		status->link_id = mstat.wcid->link_id;
1276 	}
1277 
1278 	*sta = wcid_to_sta(mstat.wcid);
1279 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1280 }
1281 
1282 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1283 mt76_check_ccmp_pn(struct sk_buff *skb)
1284 {
1285 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1286 	struct mt76_wcid *wcid = status->wcid;
1287 	struct ieee80211_hdr *hdr;
1288 	int security_idx;
1289 	int ret;
1290 
1291 	if (!(status->flag & RX_FLAG_DECRYPTED))
1292 		return;
1293 
1294 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1295 		return;
1296 
1297 	if (!wcid || !wcid->rx_check_pn)
1298 		return;
1299 
1300 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1301 	if (status->flag & RX_FLAG_8023)
1302 		goto skip_hdr_check;
1303 
1304 	hdr = mt76_skb_get_hdr(skb);
1305 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1306 		/*
1307 		 * Validate the first fragment both here and in mac80211
1308 		 * All further fragments will be validated by mac80211 only.
1309 		 */
1310 		if (ieee80211_is_frag(hdr) &&
1311 		    !ieee80211_is_first_frag(hdr->frame_control))
1312 			return;
1313 	}
1314 
1315 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1316 	 *
1317 	 * the recipient shall maintain a single replay counter for received
1318 	 * individually addressed robust Management frames that are received
1319 	 * with the To DS subfield equal to 0, [...]
1320 	 */
1321 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1322 	    !ieee80211_has_tods(hdr->frame_control))
1323 		security_idx = IEEE80211_NUM_TIDS;
1324 
1325 skip_hdr_check:
1326 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1327 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1328 		     sizeof(status->iv));
1329 	if (ret <= 0) {
1330 		status->flag |= RX_FLAG_ONLY_MONITOR;
1331 		return;
1332 	}
1333 
1334 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1335 
1336 	if (status->flag & RX_FLAG_IV_STRIPPED)
1337 		status->flag |= RX_FLAG_PN_VALIDATED;
1338 }
1339 
1340 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1341 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1342 		    int len)
1343 {
1344 	struct mt76_wcid *wcid = status->wcid;
1345 	struct ieee80211_rx_status info = {
1346 		.enc_flags = status->enc_flags,
1347 		.rate_idx = status->rate_idx,
1348 		.encoding = status->encoding,
1349 		.band = status->band,
1350 		.nss = status->nss,
1351 		.bw = status->bw,
1352 	};
1353 	struct ieee80211_sta *sta;
1354 	u32 airtime;
1355 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1356 
1357 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1358 	spin_lock(&dev->cc_lock);
1359 	dev->cur_cc_bss_rx += airtime;
1360 	spin_unlock(&dev->cc_lock);
1361 
1362 	if (!wcid || !wcid->sta)
1363 		return;
1364 
1365 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1366 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1367 }
1368 
1369 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1370 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1371 {
1372 	struct mt76_wcid *wcid;
1373 	int wcid_idx;
1374 
1375 	if (!dev->rx_ampdu_len)
1376 		return;
1377 
1378 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1379 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1380 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1381 	else
1382 		wcid = NULL;
1383 	dev->rx_ampdu_status.wcid = wcid;
1384 
1385 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1386 
1387 	dev->rx_ampdu_len = 0;
1388 	dev->rx_ampdu_ref = 0;
1389 }
1390 
1391 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1392 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1393 {
1394 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1395 	struct mt76_wcid *wcid = status->wcid;
1396 
1397 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1398 		return;
1399 
1400 	if (!wcid || !wcid->sta) {
1401 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1402 
1403 		if (status->flag & RX_FLAG_8023)
1404 			return;
1405 
1406 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1407 			return;
1408 
1409 		wcid = NULL;
1410 	}
1411 
1412 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1413 	    status->ampdu_ref != dev->rx_ampdu_ref)
1414 		mt76_airtime_flush_ampdu(dev);
1415 
1416 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1417 		if (!dev->rx_ampdu_len ||
1418 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1419 			dev->rx_ampdu_status = *status;
1420 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1421 			dev->rx_ampdu_ref = status->ampdu_ref;
1422 		}
1423 
1424 		dev->rx_ampdu_len += skb->len;
1425 		return;
1426 	}
1427 
1428 	mt76_airtime_report(dev, status, skb->len);
1429 }
1430 
1431 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1432 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1433 {
1434 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1435 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1436 	struct ieee80211_sta *sta;
1437 	struct ieee80211_hw *hw;
1438 	struct mt76_wcid *wcid = status->wcid;
1439 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1440 	bool ps;
1441 
1442 	hw = mt76_phy_hw(dev, status->phy_idx);
1443 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1444 	    !(status->flag & RX_FLAG_8023)) {
1445 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1446 		if (sta)
1447 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1448 	}
1449 
1450 	mt76_airtime_check(dev, skb);
1451 
1452 	if (!wcid || !wcid->sta)
1453 		return;
1454 
1455 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1456 
1457 	if (status->signal <= 0)
1458 		ewma_signal_add(&wcid->rssi, -status->signal);
1459 
1460 	wcid->inactive_count = 0;
1461 
1462 	if (status->flag & RX_FLAG_8023)
1463 		return;
1464 
1465 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1466 		return;
1467 
1468 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1469 		ieee80211_sta_pspoll(sta);
1470 		return;
1471 	}
1472 
1473 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1474 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1475 	      ieee80211_is_data(hdr->frame_control)))
1476 		return;
1477 
1478 	ps = ieee80211_has_pm(hdr->frame_control);
1479 
1480 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1481 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1482 		ieee80211_sta_uapsd_trigger(sta, tidno);
1483 
1484 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1485 		return;
1486 
1487 	if (ps)
1488 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1489 
1490 	if (dev->drv->sta_ps)
1491 		dev->drv->sta_ps(dev, sta, ps);
1492 
1493 	if (!ps)
1494 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1495 
1496 	ieee80211_sta_ps_transition(sta, ps);
1497 }
1498 
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1499 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1500 		      struct napi_struct *napi)
1501 {
1502 	struct ieee80211_sta *sta;
1503 	struct ieee80211_hw *hw;
1504 	struct sk_buff *skb, *tmp;
1505 	LIST_HEAD(list);
1506 
1507 	spin_lock(&dev->rx_lock);
1508 	while ((skb = __skb_dequeue(frames)) != NULL) {
1509 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1510 
1511 		mt76_check_ccmp_pn(skb);
1512 		skb_shinfo(skb)->frag_list = NULL;
1513 		mt76_rx_convert(dev, skb, &hw, &sta);
1514 		ieee80211_rx_list(hw, sta, skb, &list);
1515 
1516 		/* subsequent amsdu frames */
1517 		while (nskb) {
1518 			skb = nskb;
1519 			nskb = nskb->next;
1520 			skb->next = NULL;
1521 
1522 			mt76_rx_convert(dev, skb, &hw, &sta);
1523 			ieee80211_rx_list(hw, sta, skb, &list);
1524 		}
1525 	}
1526 	spin_unlock(&dev->rx_lock);
1527 
1528 	if (!napi) {
1529 		netif_receive_skb_list(&list);
1530 		return;
1531 	}
1532 
1533 	list_for_each_entry_safe(skb, tmp, &list, list) {
1534 		skb_list_del_init(skb);
1535 		napi_gro_receive(napi, skb);
1536 	}
1537 }
1538 
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1539 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1540 			   struct napi_struct *napi)
1541 {
1542 	struct sk_buff_head frames;
1543 	struct sk_buff *skb;
1544 
1545 	__skb_queue_head_init(&frames);
1546 
1547 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1548 		mt76_check_sta(dev, skb);
1549 		if (mtk_wed_device_active(&dev->mmio.wed))
1550 			__skb_queue_tail(&frames, skb);
1551 		else
1552 			mt76_rx_aggr_reorder(skb, &frames);
1553 	}
1554 
1555 	mt76_rx_complete(dev, &frames, napi);
1556 }
1557 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1558 
1559 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1560 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1561 	     struct ieee80211_sta *sta)
1562 {
1563 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1564 	struct mt76_dev *dev = phy->dev;
1565 	int ret;
1566 	int i;
1567 
1568 	mutex_lock(&dev->mutex);
1569 
1570 	ret = dev->drv->sta_add(dev, vif, sta);
1571 	if (ret)
1572 		goto out;
1573 
1574 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1575 		struct mt76_txq *mtxq;
1576 
1577 		if (!sta->txq[i])
1578 			continue;
1579 
1580 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1581 		mtxq->wcid = wcid->idx;
1582 	}
1583 
1584 	ewma_signal_init(&wcid->rssi);
1585 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1586 	phy->num_sta++;
1587 
1588 	mt76_wcid_init(wcid, phy->band_idx);
1589 out:
1590 	mutex_unlock(&dev->mutex);
1591 
1592 	return ret;
1593 }
1594 
__mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1595 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1596 		       struct ieee80211_sta *sta)
1597 {
1598 	struct mt76_dev *dev = phy->dev;
1599 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1600 	int i, idx = wcid->idx;
1601 
1602 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1603 		mt76_rx_aggr_stop(dev, wcid, i);
1604 
1605 	if (dev->drv->sta_remove)
1606 		dev->drv->sta_remove(dev, vif, sta);
1607 
1608 	mt76_wcid_cleanup(dev, wcid);
1609 
1610 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1611 	phy->num_sta--;
1612 }
1613 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1614 
1615 static void
mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1616 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1617 		struct ieee80211_sta *sta)
1618 {
1619 	struct mt76_dev *dev = phy->dev;
1620 
1621 	mutex_lock(&dev->mutex);
1622 	__mt76_sta_remove(phy, vif, sta);
1623 	mutex_unlock(&dev->mutex);
1624 }
1625 
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1626 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1627 		   struct ieee80211_sta *sta,
1628 		   enum ieee80211_sta_state old_state,
1629 		   enum ieee80211_sta_state new_state)
1630 {
1631 	struct mt76_phy *phy = hw->priv;
1632 	struct mt76_dev *dev = phy->dev;
1633 	enum mt76_sta_event ev;
1634 
1635 	phy = mt76_vif_phy(hw, vif);
1636 	if (!phy)
1637 		return -EINVAL;
1638 
1639 	if (old_state == IEEE80211_STA_NOTEXIST &&
1640 	    new_state == IEEE80211_STA_NONE)
1641 		return mt76_sta_add(phy, vif, sta);
1642 
1643 	if (old_state == IEEE80211_STA_NONE &&
1644 	    new_state == IEEE80211_STA_NOTEXIST)
1645 		mt76_sta_remove(phy, vif, sta);
1646 
1647 	if (!dev->drv->sta_event)
1648 		return 0;
1649 
1650 	if (old_state == IEEE80211_STA_AUTH &&
1651 	    new_state == IEEE80211_STA_ASSOC)
1652 		ev = MT76_STA_EVENT_ASSOC;
1653 	else if (old_state == IEEE80211_STA_ASSOC &&
1654 		 new_state == IEEE80211_STA_AUTHORIZED)
1655 		ev = MT76_STA_EVENT_AUTHORIZE;
1656 	else if (old_state == IEEE80211_STA_ASSOC &&
1657 		 new_state == IEEE80211_STA_AUTH)
1658 		ev = MT76_STA_EVENT_DISASSOC;
1659 	else
1660 		return 0;
1661 
1662 	return dev->drv->sta_event(dev, vif, sta, ev);
1663 }
1664 EXPORT_SYMBOL_GPL(mt76_sta_state);
1665 
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1666 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1667 			     struct ieee80211_sta *sta)
1668 {
1669 	struct mt76_phy *phy = hw->priv;
1670 	struct mt76_dev *dev = phy->dev;
1671 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1672 
1673 	mutex_lock(&dev->mutex);
1674 	spin_lock_bh(&dev->status_lock);
1675 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1676 	spin_unlock_bh(&dev->status_lock);
1677 	mutex_unlock(&dev->mutex);
1678 }
1679 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1680 
mt76_wcid_init(struct mt76_wcid * wcid,u8 band_idx)1681 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1682 {
1683 	wcid->hw_key_idx = -1;
1684 	wcid->phy_idx = band_idx;
1685 
1686 	INIT_LIST_HEAD(&wcid->tx_list);
1687 	skb_queue_head_init(&wcid->tx_pending);
1688 	skb_queue_head_init(&wcid->tx_offchannel);
1689 
1690 	INIT_LIST_HEAD(&wcid->list);
1691 	idr_init(&wcid->pktid);
1692 
1693 	INIT_LIST_HEAD(&wcid->poll_list);
1694 }
1695 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1696 
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1697 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1698 {
1699 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1700 	struct ieee80211_hw *hw;
1701 	struct sk_buff_head list;
1702 	struct sk_buff *skb;
1703 
1704 	mt76_tx_status_lock(dev, &list);
1705 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1706 	mt76_tx_status_unlock(dev, &list);
1707 
1708 	idr_destroy(&wcid->pktid);
1709 
1710 	spin_lock_bh(&phy->tx_lock);
1711 
1712 	if (!list_empty(&wcid->tx_list))
1713 		list_del_init(&wcid->tx_list);
1714 
1715 	spin_lock(&wcid->tx_pending.lock);
1716 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1717 	spin_unlock(&wcid->tx_pending.lock);
1718 
1719 	spin_lock(&wcid->tx_offchannel.lock);
1720 	skb_queue_splice_tail_init(&wcid->tx_offchannel, &list);
1721 	spin_unlock(&wcid->tx_offchannel.lock);
1722 
1723 	spin_unlock_bh(&phy->tx_lock);
1724 
1725 	while ((skb = __skb_dequeue(&list)) != NULL) {
1726 		hw = mt76_tx_status_get_hw(dev, skb);
1727 		ieee80211_free_txskb(hw, skb);
1728 	}
1729 }
1730 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1731 
mt76_wcid_add_poll(struct mt76_dev * dev,struct mt76_wcid * wcid)1732 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1733 {
1734 	if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
1735 		return;
1736 
1737 	spin_lock_bh(&dev->sta_poll_lock);
1738 	if (list_empty(&wcid->poll_list))
1739 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1740 	spin_unlock_bh(&dev->sta_poll_lock);
1741 }
1742 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1743 
mt76_get_power_bound(struct mt76_phy * phy,s8 txpower)1744 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
1745 {
1746 	int n_chains = hweight16(phy->chainmask);
1747 
1748 	txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
1749 	txpower -= mt76_tx_power_path_delta(n_chains);
1750 
1751 	return txpower;
1752 }
1753 EXPORT_SYMBOL_GPL(mt76_get_power_bound);
1754 
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,int * dbm)1755 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1756 		     unsigned int link_id, int *dbm)
1757 {
1758 	struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1759 	int n_chains, delta;
1760 
1761 	if (!phy)
1762 		return -EINVAL;
1763 
1764 	n_chains = hweight16(phy->chainmask);
1765 	delta = mt76_tx_power_path_delta(n_chains);
1766 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1767 
1768 	return 0;
1769 }
1770 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1771 
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1772 int mt76_init_sar_power(struct ieee80211_hw *hw,
1773 			const struct cfg80211_sar_specs *sar)
1774 {
1775 	struct mt76_phy *phy = hw->priv;
1776 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1777 	int i;
1778 
1779 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1780 		return -EINVAL;
1781 
1782 	for (i = 0; i < sar->num_sub_specs; i++) {
1783 		u32 index = sar->sub_specs[i].freq_range_index;
1784 		/* SAR specifies power limitaton in 0.25dbm */
1785 		s32 power = sar->sub_specs[i].power >> 1;
1786 
1787 		if (power > 127 || power < -127)
1788 			power = 127;
1789 
1790 		phy->frp[index].range = &capa->freq_ranges[index];
1791 		phy->frp[index].power = power;
1792 	}
1793 
1794 	return 0;
1795 }
1796 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1797 
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1798 int mt76_get_sar_power(struct mt76_phy *phy,
1799 		       struct ieee80211_channel *chan,
1800 		       int power)
1801 {
1802 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1803 	int freq, i;
1804 
1805 	if (!capa || !phy->frp)
1806 		return power;
1807 
1808 	if (power > 127 || power < -127)
1809 		power = 127;
1810 
1811 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1812 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1813 		if (phy->frp[i].range &&
1814 		    freq >= phy->frp[i].range->start_freq &&
1815 		    freq < phy->frp[i].range->end_freq) {
1816 			power = min_t(int, phy->frp[i].power, power);
1817 			break;
1818 		}
1819 	}
1820 
1821 	return power;
1822 }
1823 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1824 
1825 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1826 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1827 {
1828 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1829 		ieee80211_csa_finish(vif, 0);
1830 }
1831 
mt76_csa_finish(struct mt76_dev * dev)1832 void mt76_csa_finish(struct mt76_dev *dev)
1833 {
1834 	if (!dev->csa_complete)
1835 		return;
1836 
1837 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1838 		IEEE80211_IFACE_ITER_RESUME_ALL,
1839 		__mt76_csa_finish, dev);
1840 
1841 	dev->csa_complete = 0;
1842 }
1843 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1844 
1845 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1846 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1847 {
1848 	struct mt76_dev *dev = priv;
1849 
1850 	if (!vif->bss_conf.csa_active)
1851 		return;
1852 
1853 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1854 }
1855 
mt76_csa_check(struct mt76_dev * dev)1856 void mt76_csa_check(struct mt76_dev *dev)
1857 {
1858 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1859 		IEEE80211_IFACE_ITER_RESUME_ALL,
1860 		__mt76_csa_check, dev);
1861 }
1862 EXPORT_SYMBOL_GPL(mt76_csa_check);
1863 
1864 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1865 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1866 {
1867 	return 0;
1868 }
1869 EXPORT_SYMBOL_GPL(mt76_set_tim);
1870 
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1871 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1872 {
1873 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1874 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1875 	u8 *hdr, *pn = status->iv;
1876 
1877 	__skb_push(skb, 8);
1878 	memmove(skb->data, skb->data + 8, hdr_len);
1879 	hdr = skb->data + hdr_len;
1880 
1881 	hdr[0] = pn[5];
1882 	hdr[1] = pn[4];
1883 	hdr[2] = 0;
1884 	hdr[3] = 0x20 | (key_id << 6);
1885 	hdr[4] = pn[3];
1886 	hdr[5] = pn[2];
1887 	hdr[6] = pn[1];
1888 	hdr[7] = pn[0];
1889 
1890 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1891 }
1892 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1893 
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1894 int mt76_get_rate(struct mt76_dev *dev,
1895 		  struct ieee80211_supported_band *sband,
1896 		  int idx, bool cck)
1897 {
1898 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1899 	int i, offset = 0, len = sband->n_bitrates;
1900 
1901 	if (cck) {
1902 		if (!is_2g)
1903 			return 0;
1904 
1905 		idx &= ~BIT(2); /* short preamble */
1906 	} else if (is_2g) {
1907 		offset = 4;
1908 	}
1909 
1910 	for (i = offset; i < len; i++) {
1911 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1912 			return i;
1913 	}
1914 
1915 	return 0;
1916 }
1917 EXPORT_SYMBOL_GPL(mt76_get_rate);
1918 
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1919 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1920 		  const u8 *mac)
1921 {
1922 	struct mt76_phy *phy = hw->priv;
1923 
1924 	set_bit(MT76_SCANNING, &phy->state);
1925 }
1926 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1927 
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1928 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1929 {
1930 	struct mt76_phy *phy = hw->priv;
1931 
1932 	clear_bit(MT76_SCANNING, &phy->state);
1933 }
1934 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1935 
mt76_get_antenna(struct ieee80211_hw * hw,int radio_idx,u32 * tx_ant,u32 * rx_ant)1936 int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
1937 		     u32 *rx_ant)
1938 {
1939 	struct mt76_phy *phy = hw->priv;
1940 	struct mt76_dev *dev = phy->dev;
1941 	int i;
1942 
1943 	mutex_lock(&dev->mutex);
1944 	*tx_ant = 0;
1945 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1946 		if (dev->phys[i] && dev->phys[i]->hw == hw)
1947 			*tx_ant |= dev->phys[i]->chainmask;
1948 	*rx_ant = *tx_ant;
1949 	mutex_unlock(&dev->mutex);
1950 
1951 	return 0;
1952 }
1953 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1954 
1955 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)1956 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1957 		int ring_base, void *wed, u32 flags)
1958 {
1959 	struct mt76_queue *hwq;
1960 	int err;
1961 
1962 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1963 	if (!hwq)
1964 		return ERR_PTR(-ENOMEM);
1965 
1966 	hwq->flags = flags;
1967 	hwq->wed = wed;
1968 
1969 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1970 	if (err < 0)
1971 		return ERR_PTR(err);
1972 
1973 	return hwq;
1974 }
1975 EXPORT_SYMBOL_GPL(mt76_init_queue);
1976 
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1977 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1978 			 struct mt76_sta_stats *stats, bool eht)
1979 {
1980 	int i, ei = wi->initial_stat_idx;
1981 	u64 *data = wi->data;
1982 
1983 	wi->sta_count++;
1984 
1985 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1986 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1987 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1988 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1989 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1990 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1991 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1992 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1993 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1994 	if (eht) {
1995 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1996 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1997 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1998 	}
1999 
2000 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
2001 		data[ei++] += stats->tx_bw[i];
2002 
2003 	for (i = 0; i < (eht ? 14 : 12); i++)
2004 		data[ei++] += stats->tx_mcs[i];
2005 
2006 	for (i = 0; i < 4; i++)
2007 		data[ei++] += stats->tx_nss[i];
2008 
2009 	wi->worker_stat_count = ei - wi->initial_stat_idx;
2010 }
2011 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
2012 
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)2013 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
2014 {
2015 #ifdef CONFIG_PAGE_POOL_STATS
2016 	struct page_pool_stats stats = {};
2017 	int i;
2018 
2019 	mt76_for_each_q_rx(dev, i)
2020 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
2021 
2022 	page_pool_ethtool_stats_get(data, &stats);
2023 	*index += page_pool_ethtool_stats_get_count();
2024 #endif
2025 }
2026 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
2027 
mt76_phy_dfs_state(struct mt76_phy * phy)2028 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
2029 {
2030 	struct ieee80211_hw *hw = phy->hw;
2031 	struct mt76_dev *dev = phy->dev;
2032 
2033 	if (dev->region == NL80211_DFS_UNSET ||
2034 	    test_bit(MT76_SCANNING, &phy->state))
2035 		return MT_DFS_STATE_DISABLED;
2036 
2037 	if (!phy->radar_enabled) {
2038 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
2039 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
2040 			return MT_DFS_STATE_ACTIVE;
2041 
2042 		return MT_DFS_STATE_DISABLED;
2043 	}
2044 
2045 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
2046 		return MT_DFS_STATE_CAC;
2047 
2048 	return MT_DFS_STATE_ACTIVE;
2049 }
2050 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
2051 
mt76_vif_cleanup(struct mt76_dev * dev,struct ieee80211_vif * vif)2052 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
2053 {
2054 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2055 	struct mt76_vif_data *mvif = mlink->mvif;
2056 
2057 	rcu_assign_pointer(mvif->link[0], NULL);
2058 	mt76_abort_scan(dev);
2059 	if (mvif->roc_phy)
2060 		mt76_abort_roc(mvif->roc_phy);
2061 }
2062 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2063