xref: /linux/drivers/net/wireless/mediatek/mt76/mac80211.c (revision c1ead4b4dfe0f643cfc66571ca7d2fa332eddd35)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 	struct device_node *np = dev->dev->of_node;
201 
202 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 		return 0;
204 
205 	np = of_get_child_by_name(np, "led");
206 	if (np) {
207 		if (!of_device_is_available(np)) {
208 			of_node_put(np);
209 			dev_info(dev->dev,
210 				"led registration was explicitly disabled by dts\n");
211 			return 0;
212 		}
213 
214 		if (phy == &dev->phy) {
215 			int led_pin;
216 
217 			if (!of_property_read_u32(np, "led-sources", &led_pin))
218 				phy->leds.pin = led_pin;
219 
220 			phy->leds.al =
221 				of_property_read_bool(np, "led-active-low");
222 		}
223 
224 		of_node_put(np);
225 	}
226 
227 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 		 wiphy_name(hw->wiphy));
229 
230 	phy->leds.cdev.name = phy->leds.name;
231 	phy->leds.cdev.default_trigger =
232 		ieee80211_create_tpt_led_trigger(hw,
233 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 					mt76_tpt_blink,
235 					ARRAY_SIZE(mt76_tpt_blink));
236 
237 	dev_info(dev->dev,
238 		"registering led '%s'\n", phy->leds.name);
239 
240 	return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242 
mt76_led_cleanup(struct mt76_phy * phy)243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 		return;
247 
248 	led_classdev_unregister(&phy->leds.cdev);
249 }
250 
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 				 struct ieee80211_supported_band *sband,
253 				 bool vht)
254 {
255 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 	int i, nstream = hweight8(phy->antenna_mask);
257 	struct ieee80211_sta_vht_cap *vht_cap;
258 	u16 mcs_map = 0;
259 
260 	if (nstream > 1)
261 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 	else
263 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264 
265 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267 
268 	if (!vht)
269 		return;
270 
271 	vht_cap = &sband->vht_cap;
272 	if (nstream > 1)
273 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 	else
275 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278 
279 	for (i = 0; i < 8; i++) {
280 		if (i < nstream)
281 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 		else
283 			mcs_map |=
284 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 	}
286 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 		vht_cap->vht_mcs.tx_highest |=
290 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292 
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 	if (phy->cap.has_2ghz)
296 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 	if (phy->cap.has_5ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 	if (phy->cap.has_6ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303 
304 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 		const struct ieee80211_channel *chan, int n_chan,
307 		struct ieee80211_rate *rates, int n_rates,
308 		bool ht, bool vht)
309 {
310 	struct ieee80211_supported_band *sband = &msband->sband;
311 	struct ieee80211_sta_vht_cap *vht_cap;
312 	struct ieee80211_sta_ht_cap *ht_cap;
313 	struct mt76_dev *dev = phy->dev;
314 	void *chanlist;
315 	int size;
316 
317 	size = n_chan * sizeof(*chan);
318 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 	if (!chanlist)
320 		return -ENOMEM;
321 
322 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 				    GFP_KERNEL);
324 	if (!msband->chan)
325 		return -ENOMEM;
326 
327 	sband->channels = chanlist;
328 	sband->n_channels = n_chan;
329 	sband->bitrates = rates;
330 	sband->n_bitrates = n_rates;
331 
332 	if (!ht)
333 		return 0;
334 
335 	ht_cap = &sband->ht_cap;
336 	ht_cap->ht_supported = true;
337 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 		       IEEE80211_HT_CAP_GRN_FLD |
339 		       IEEE80211_HT_CAP_SGI_20 |
340 		       IEEE80211_HT_CAP_SGI_40 |
341 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342 
343 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345 
346 	mt76_init_stream_cap(phy, sband, vht);
347 
348 	if (!vht)
349 		return 0;
350 
351 	vht_cap = &sband->vht_cap;
352 	vht_cap->vht_supported = true;
353 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 			IEEE80211_VHT_CAP_RXSTBC_1 |
355 			IEEE80211_VHT_CAP_SHORT_GI_80 |
356 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357 
358 	return 0;
359 }
360 
361 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369 			       n_rates, true, false);
370 }
371 
372 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates, bool vht)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380 			       n_rates, true, vht);
381 }
382 
383 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 		   int n_rates)
386 {
387 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388 
389 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391 			       n_rates, false, false);
392 }
393 
394 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 		 enum nl80211_band band)
397 {
398 	struct ieee80211_supported_band *sband = &msband->sband;
399 	bool found = false;
400 	int i;
401 
402 	if (!sband)
403 		return;
404 
405 	for (i = 0; i < sband->n_channels; i++) {
406 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 			continue;
408 
409 		found = true;
410 		break;
411 	}
412 
413 	if (found) {
414 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
415 					NL80211_CHAN_HT20);
416 		phy->chan_state = &msband->chan[0];
417 		phy->dev->band_phys[band] = phy;
418 		return;
419 	}
420 
421 	sband->n_channels = 0;
422 	if (phy->hw->wiphy->bands[band] == sband)
423 		phy->hw->wiphy->bands[band] = NULL;
424 }
425 
426 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)427 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
428 {
429 	struct mt76_dev *dev = phy->dev;
430 	struct wiphy *wiphy = hw->wiphy;
431 
432 	INIT_LIST_HEAD(&phy->tx_list);
433 	spin_lock_init(&phy->tx_lock);
434 	INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
435 
436 	if ((void *)phy != hw->priv)
437 		return 0;
438 
439 	SET_IEEE80211_DEV(hw, dev->dev);
440 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
441 
442 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
443 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
444 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
445 			WIPHY_FLAG_SUPPORTS_TDLS |
446 			WIPHY_FLAG_AP_UAPSD;
447 
448 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
449 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
450 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
451 
452 	if (!wiphy->available_antennas_tx)
453 		wiphy->available_antennas_tx = phy->antenna_mask;
454 	if (!wiphy->available_antennas_rx)
455 		wiphy->available_antennas_rx = phy->antenna_mask;
456 
457 	wiphy->sar_capa = &mt76_sar_capa;
458 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
459 				sizeof(struct mt76_freq_range_power),
460 				GFP_KERNEL);
461 	if (!phy->frp)
462 		return -ENOMEM;
463 
464 	hw->txq_data_size = sizeof(struct mt76_txq);
465 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
466 
467 	if (!hw->max_tx_fragments)
468 		hw->max_tx_fragments = 16;
469 
470 	ieee80211_hw_set(hw, SIGNAL_DBM);
471 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
472 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
473 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
474 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
475 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
476 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
477 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
478 
479 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
480 	    hw->max_tx_fragments > 1) {
481 		ieee80211_hw_set(hw, TX_AMSDU);
482 		ieee80211_hw_set(hw, TX_FRAG_LIST);
483 	}
484 
485 	ieee80211_hw_set(hw, MFP_CAPABLE);
486 	ieee80211_hw_set(hw, AP_LINK_PS);
487 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
488 
489 	return 0;
490 }
491 
492 struct mt76_phy *
mt76_alloc_radio_phy(struct mt76_dev * dev,unsigned int size,u8 band_idx)493 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
494 		     u8 band_idx)
495 {
496 	struct ieee80211_hw *hw = dev->phy.hw;
497 	unsigned int phy_size;
498 	struct mt76_phy *phy;
499 
500 	phy_size = ALIGN(sizeof(*phy), 8);
501 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
502 	if (!phy)
503 		return NULL;
504 
505 	phy->dev = dev;
506 	phy->hw = hw;
507 	phy->priv = (void *)phy + phy_size;
508 	phy->band_idx = band_idx;
509 
510 	return phy;
511 }
512 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
513 
514 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)515 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
516 	       const struct ieee80211_ops *ops, u8 band_idx)
517 {
518 	struct ieee80211_hw *hw;
519 	unsigned int phy_size;
520 	struct mt76_phy *phy;
521 
522 	phy_size = ALIGN(sizeof(*phy), 8);
523 	hw = ieee80211_alloc_hw(size + phy_size, ops);
524 	if (!hw)
525 		return NULL;
526 
527 	phy = hw->priv;
528 	phy->dev = dev;
529 	phy->hw = hw;
530 	phy->priv = hw->priv + phy_size;
531 	phy->band_idx = band_idx;
532 
533 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
534 	hw->wiphy->interface_modes =
535 		BIT(NL80211_IFTYPE_STATION) |
536 		BIT(NL80211_IFTYPE_AP) |
537 #ifdef CONFIG_MAC80211_MESH
538 		BIT(NL80211_IFTYPE_MESH_POINT) |
539 #endif
540 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
541 		BIT(NL80211_IFTYPE_P2P_GO) |
542 		BIT(NL80211_IFTYPE_ADHOC);
543 
544 	return phy;
545 }
546 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
547 
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)548 int mt76_register_phy(struct mt76_phy *phy, bool vht,
549 		      struct ieee80211_rate *rates, int n_rates)
550 {
551 	int ret;
552 
553 	ret = mt76_phy_init(phy, phy->hw);
554 	if (ret)
555 		return ret;
556 
557 	if (phy->cap.has_2ghz) {
558 		ret = mt76_init_sband_2g(phy, rates, n_rates);
559 		if (ret)
560 			return ret;
561 	}
562 
563 	if (phy->cap.has_5ghz) {
564 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
565 		if (ret)
566 			return ret;
567 	}
568 
569 	if (phy->cap.has_6ghz) {
570 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
571 		if (ret)
572 			return ret;
573 	}
574 
575 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
576 		ret = mt76_led_init(phy);
577 		if (ret)
578 			return ret;
579 	}
580 
581 	wiphy_read_of_freq_limits(phy->hw->wiphy);
582 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
583 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
584 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
585 
586 	if ((void *)phy == phy->hw->priv) {
587 		ret = ieee80211_register_hw(phy->hw);
588 		if (ret)
589 			return ret;
590 	}
591 
592 	set_bit(MT76_STATE_REGISTERED, &phy->state);
593 	phy->dev->phys[phy->band_idx] = phy;
594 
595 	return 0;
596 }
597 EXPORT_SYMBOL_GPL(mt76_register_phy);
598 
mt76_unregister_phy(struct mt76_phy * phy)599 void mt76_unregister_phy(struct mt76_phy *phy)
600 {
601 	struct mt76_dev *dev = phy->dev;
602 
603 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
604 		return;
605 
606 	if (IS_ENABLED(CONFIG_MT76_LEDS))
607 		mt76_led_cleanup(phy);
608 	mt76_tx_status_check(dev, true);
609 	ieee80211_unregister_hw(phy->hw);
610 	dev->phys[phy->band_idx] = NULL;
611 }
612 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
613 
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)614 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
615 {
616 	bool is_qrx = mt76_queue_is_rx(dev, q);
617 	struct page_pool_params pp_params = {
618 		.order = 0,
619 		.flags = 0,
620 		.nid = NUMA_NO_NODE,
621 		.dev = dev->dma_dev,
622 	};
623 	int idx = is_qrx ? q - dev->q_rx : -1;
624 
625 	/* Allocate page_pools just for rx/wed_tx_free queues */
626 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
627 		return 0;
628 
629 	switch (idx) {
630 	case MT_RXQ_MAIN:
631 	case MT_RXQ_BAND1:
632 	case MT_RXQ_BAND2:
633 		pp_params.pool_size = 256;
634 		break;
635 	default:
636 		pp_params.pool_size = 16;
637 		break;
638 	}
639 
640 	if (mt76_is_mmio(dev)) {
641 		/* rely on page_pool for DMA mapping */
642 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
643 		pp_params.dma_dir = DMA_FROM_DEVICE;
644 		pp_params.max_len = PAGE_SIZE;
645 		pp_params.offset = 0;
646 		/* NAPI is available just for rx queues */
647 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
648 			pp_params.napi = &dev->napi[idx];
649 	}
650 
651 	q->page_pool = page_pool_create(&pp_params);
652 	if (IS_ERR(q->page_pool)) {
653 		int err = PTR_ERR(q->page_pool);
654 
655 		q->page_pool = NULL;
656 		return err;
657 	}
658 
659 	return 0;
660 }
661 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
662 
663 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)664 mt76_alloc_device(struct device *pdev, unsigned int size,
665 		  const struct ieee80211_ops *ops,
666 		  const struct mt76_driver_ops *drv_ops)
667 {
668 	struct ieee80211_hw *hw;
669 	struct mt76_phy *phy;
670 	struct mt76_dev *dev;
671 	int i;
672 
673 	hw = ieee80211_alloc_hw(size, ops);
674 	if (!hw)
675 		return NULL;
676 
677 	dev = hw->priv;
678 	dev->hw = hw;
679 	dev->dev = pdev;
680 	dev->drv = drv_ops;
681 	dev->dma_dev = pdev;
682 
683 	phy = &dev->phy;
684 	phy->dev = dev;
685 	phy->hw = hw;
686 	phy->band_idx = MT_BAND0;
687 	dev->phys[phy->band_idx] = phy;
688 
689 	spin_lock_init(&dev->rx_lock);
690 	spin_lock_init(&dev->lock);
691 	spin_lock_init(&dev->cc_lock);
692 	spin_lock_init(&dev->status_lock);
693 	spin_lock_init(&dev->wed_lock);
694 	mutex_init(&dev->mutex);
695 	init_waitqueue_head(&dev->tx_wait);
696 
697 	skb_queue_head_init(&dev->mcu.res_q);
698 	init_waitqueue_head(&dev->mcu.wait);
699 	mutex_init(&dev->mcu.mutex);
700 	dev->tx_worker.fn = mt76_tx_worker;
701 
702 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
703 	hw->wiphy->interface_modes =
704 		BIT(NL80211_IFTYPE_STATION) |
705 		BIT(NL80211_IFTYPE_AP) |
706 #ifdef CONFIG_MAC80211_MESH
707 		BIT(NL80211_IFTYPE_MESH_POINT) |
708 #endif
709 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
710 		BIT(NL80211_IFTYPE_P2P_GO) |
711 		BIT(NL80211_IFTYPE_ADHOC);
712 
713 	spin_lock_init(&dev->token_lock);
714 	idr_init(&dev->token);
715 
716 	spin_lock_init(&dev->rx_token_lock);
717 	idr_init(&dev->rx_token);
718 
719 	INIT_LIST_HEAD(&dev->wcid_list);
720 	INIT_LIST_HEAD(&dev->sta_poll_list);
721 	spin_lock_init(&dev->sta_poll_lock);
722 
723 	INIT_LIST_HEAD(&dev->txwi_cache);
724 	INIT_LIST_HEAD(&dev->rxwi_cache);
725 	dev->token_size = dev->drv->token_size;
726 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
727 
728 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
729 		skb_queue_head_init(&dev->rx_skb[i]);
730 
731 	dev->wq = alloc_ordered_workqueue("mt76", 0);
732 	if (!dev->wq) {
733 		ieee80211_free_hw(hw);
734 		return NULL;
735 	}
736 
737 	return dev;
738 }
739 EXPORT_SYMBOL_GPL(mt76_alloc_device);
740 
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)741 int mt76_register_device(struct mt76_dev *dev, bool vht,
742 			 struct ieee80211_rate *rates, int n_rates)
743 {
744 	struct ieee80211_hw *hw = dev->hw;
745 	struct mt76_phy *phy = &dev->phy;
746 	int ret;
747 
748 	dev_set_drvdata(dev->dev, dev);
749 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
750 	ret = mt76_phy_init(phy, hw);
751 	if (ret)
752 		return ret;
753 
754 	if (phy->cap.has_2ghz) {
755 		ret = mt76_init_sband_2g(phy, rates, n_rates);
756 		if (ret)
757 			return ret;
758 	}
759 
760 	if (phy->cap.has_5ghz) {
761 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
762 		if (ret)
763 			return ret;
764 	}
765 
766 	if (phy->cap.has_6ghz) {
767 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
768 		if (ret)
769 			return ret;
770 	}
771 
772 	wiphy_read_of_freq_limits(hw->wiphy);
773 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
774 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
775 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
776 
777 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
778 		ret = mt76_led_init(phy);
779 		if (ret)
780 			return ret;
781 	}
782 
783 	ret = ieee80211_register_hw(hw);
784 	if (ret)
785 		return ret;
786 
787 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
788 	set_bit(MT76_STATE_REGISTERED, &phy->state);
789 	sched_set_fifo_low(dev->tx_worker.task);
790 
791 	return 0;
792 }
793 EXPORT_SYMBOL_GPL(mt76_register_device);
794 
mt76_unregister_device(struct mt76_dev * dev)795 void mt76_unregister_device(struct mt76_dev *dev)
796 {
797 	struct ieee80211_hw *hw = dev->hw;
798 
799 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
800 		return;
801 
802 	if (IS_ENABLED(CONFIG_MT76_LEDS))
803 		mt76_led_cleanup(&dev->phy);
804 	mt76_tx_status_check(dev, true);
805 	mt76_wcid_cleanup(dev, &dev->global_wcid);
806 	ieee80211_unregister_hw(hw);
807 }
808 EXPORT_SYMBOL_GPL(mt76_unregister_device);
809 
mt76_free_device(struct mt76_dev * dev)810 void mt76_free_device(struct mt76_dev *dev)
811 {
812 	mt76_worker_teardown(&dev->tx_worker);
813 	if (dev->wq) {
814 		destroy_workqueue(dev->wq);
815 		dev->wq = NULL;
816 	}
817 	ieee80211_free_hw(dev->hw);
818 }
819 EXPORT_SYMBOL_GPL(mt76_free_device);
820 
mt76_reset_phy(struct mt76_phy * phy)821 static void mt76_reset_phy(struct mt76_phy *phy)
822 {
823 	if (!phy)
824 		return;
825 
826 	INIT_LIST_HEAD(&phy->tx_list);
827 	phy->num_sta = 0;
828 	phy->chanctx = NULL;
829 	mt76_roc_complete(phy);
830 }
831 
mt76_reset_device(struct mt76_dev * dev)832 void mt76_reset_device(struct mt76_dev *dev)
833 {
834 	int i;
835 
836 	rcu_read_lock();
837 	for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
838 		struct mt76_wcid *wcid;
839 
840 		wcid = rcu_dereference(dev->wcid[i]);
841 		if (!wcid)
842 			continue;
843 
844 		wcid->sta = 0;
845 		mt76_wcid_cleanup(dev, wcid);
846 		rcu_assign_pointer(dev->wcid[i], NULL);
847 	}
848 	rcu_read_unlock();
849 
850 	mt76_abort_scan(dev);
851 
852 	INIT_LIST_HEAD(&dev->wcid_list);
853 	INIT_LIST_HEAD(&dev->sta_poll_list);
854 	dev->vif_mask = 0;
855 	memset(dev->wcid_mask, 0, sizeof(dev->wcid_mask));
856 
857 	mt76_reset_phy(&dev->phy);
858 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
859 		mt76_reset_phy(dev->phys[i]);
860 }
861 EXPORT_SYMBOL_GPL(mt76_reset_device);
862 
mt76_vif_phy(struct ieee80211_hw * hw,struct ieee80211_vif * vif)863 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
864 			      struct ieee80211_vif *vif)
865 {
866 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
867 	struct mt76_chanctx *ctx;
868 
869 	if (!hw->wiphy->n_radio)
870 		return hw->priv;
871 
872 	if (!mlink->ctx)
873 		return NULL;
874 
875 	ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
876 	return ctx->phy;
877 }
878 EXPORT_SYMBOL_GPL(mt76_vif_phy);
879 
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)880 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
881 {
882 	struct sk_buff *skb = phy->rx_amsdu[q].head;
883 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
884 	struct mt76_dev *dev = phy->dev;
885 
886 	phy->rx_amsdu[q].head = NULL;
887 	phy->rx_amsdu[q].tail = NULL;
888 
889 	/*
890 	 * Validate if the amsdu has a proper first subframe.
891 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
892 	 * flag of the QoS header gets flipped. In such cases, the first
893 	 * subframe has a LLC/SNAP header in the location of the destination
894 	 * address.
895 	 */
896 	if (skb_shinfo(skb)->frag_list) {
897 		int offset = 0;
898 
899 		if (!(status->flag & RX_FLAG_8023)) {
900 			offset = ieee80211_get_hdrlen_from_skb(skb);
901 
902 			if ((status->flag &
903 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
904 			    RX_FLAG_DECRYPTED)
905 				offset += 8;
906 		}
907 
908 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
909 			dev_kfree_skb(skb);
910 			return;
911 		}
912 	}
913 	__skb_queue_tail(&dev->rx_skb[q], skb);
914 }
915 
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)916 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
917 				  struct sk_buff *skb)
918 {
919 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
920 
921 	if (phy->rx_amsdu[q].head &&
922 	    (!status->amsdu || status->first_amsdu ||
923 	     status->seqno != phy->rx_amsdu[q].seqno))
924 		mt76_rx_release_amsdu(phy, q);
925 
926 	if (!phy->rx_amsdu[q].head) {
927 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
928 		phy->rx_amsdu[q].seqno = status->seqno;
929 		phy->rx_amsdu[q].head = skb;
930 	} else {
931 		*phy->rx_amsdu[q].tail = skb;
932 		phy->rx_amsdu[q].tail = &skb->next;
933 	}
934 
935 	if (!status->amsdu || status->last_amsdu)
936 		mt76_rx_release_amsdu(phy, q);
937 }
938 
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)939 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
940 {
941 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
942 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
943 
944 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
945 		dev_kfree_skb(skb);
946 		return;
947 	}
948 
949 #ifdef CONFIG_NL80211_TESTMODE
950 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
951 		phy->test.rx_stats.packets[q]++;
952 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
953 			phy->test.rx_stats.fcs_error[q]++;
954 	}
955 #endif
956 
957 	mt76_rx_release_burst(phy, q, skb);
958 }
959 EXPORT_SYMBOL_GPL(mt76_rx);
960 
mt76_has_tx_pending(struct mt76_phy * phy)961 bool mt76_has_tx_pending(struct mt76_phy *phy)
962 {
963 	struct mt76_queue *q;
964 	int i;
965 
966 	for (i = 0; i < __MT_TXQ_MAX; i++) {
967 		q = phy->q_tx[i];
968 		if (q && q->queued)
969 			return true;
970 	}
971 
972 	return false;
973 }
974 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
975 
976 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)977 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
978 {
979 	struct mt76_sband *msband;
980 	int idx;
981 
982 	if (c->band == NL80211_BAND_2GHZ)
983 		msband = &phy->sband_2g;
984 	else if (c->band == NL80211_BAND_6GHZ)
985 		msband = &phy->sband_6g;
986 	else
987 		msband = &phy->sband_5g;
988 
989 	idx = c - &msband->sband.channels[0];
990 	return &msband->chan[idx];
991 }
992 
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)993 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
994 {
995 	struct mt76_channel_state *state = phy->chan_state;
996 
997 	state->cc_active += ktime_to_us(ktime_sub(time,
998 						  phy->survey_time));
999 	phy->survey_time = time;
1000 }
1001 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
1002 
mt76_update_survey(struct mt76_phy * phy)1003 void mt76_update_survey(struct mt76_phy *phy)
1004 {
1005 	struct mt76_dev *dev = phy->dev;
1006 	ktime_t cur_time;
1007 
1008 	if (dev->drv->update_survey)
1009 		dev->drv->update_survey(phy);
1010 
1011 	cur_time = ktime_get_boottime();
1012 	mt76_update_survey_active_time(phy, cur_time);
1013 
1014 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
1015 		struct mt76_channel_state *state = phy->chan_state;
1016 
1017 		spin_lock_bh(&dev->cc_lock);
1018 		state->cc_bss_rx += dev->cur_cc_bss_rx;
1019 		dev->cur_cc_bss_rx = 0;
1020 		spin_unlock_bh(&dev->cc_lock);
1021 	}
1022 }
1023 EXPORT_SYMBOL_GPL(mt76_update_survey);
1024 
__mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1025 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1026 		       bool offchannel)
1027 {
1028 	struct mt76_dev *dev = phy->dev;
1029 	int timeout = HZ / 5;
1030 	int ret;
1031 
1032 	set_bit(MT76_RESET, &phy->state);
1033 
1034 	mt76_worker_disable(&dev->tx_worker);
1035 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
1036 	mt76_update_survey(phy);
1037 
1038 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
1039 	    phy->chandef.width != chandef->width)
1040 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
1041 
1042 	phy->chandef = *chandef;
1043 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
1044 	phy->offchannel = offchannel;
1045 
1046 	if (!offchannel)
1047 		phy->main_chandef = *chandef;
1048 
1049 	if (chandef->chan != phy->main_chandef.chan)
1050 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1051 
1052 	ret = dev->drv->set_channel(phy);
1053 
1054 	clear_bit(MT76_RESET, &phy->state);
1055 	mt76_worker_enable(&dev->tx_worker);
1056 	mt76_worker_schedule(&dev->tx_worker);
1057 
1058 	return ret;
1059 }
1060 
mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1061 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1062 		     bool offchannel)
1063 {
1064 	struct mt76_dev *dev = phy->dev;
1065 	int ret;
1066 
1067 	cancel_delayed_work_sync(&phy->mac_work);
1068 
1069 	mutex_lock(&dev->mutex);
1070 	ret = __mt76_set_channel(phy, chandef, offchannel);
1071 	mutex_unlock(&dev->mutex);
1072 
1073 	return ret;
1074 }
1075 
mt76_update_channel(struct mt76_phy * phy)1076 int mt76_update_channel(struct mt76_phy *phy)
1077 {
1078 	struct ieee80211_hw *hw = phy->hw;
1079 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1080 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1081 
1082 	phy->radar_enabled = hw->conf.radar_enabled;
1083 
1084 	return mt76_set_channel(phy, chandef, offchannel);
1085 }
1086 EXPORT_SYMBOL_GPL(mt76_update_channel);
1087 
1088 static struct mt76_sband *
mt76_get_survey_sband(struct mt76_phy * phy,int * idx)1089 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1090 {
1091 	if (*idx < phy->sband_2g.sband.n_channels)
1092 		return &phy->sband_2g;
1093 
1094 	*idx -= phy->sband_2g.sband.n_channels;
1095 	if (*idx < phy->sband_5g.sband.n_channels)
1096 		return &phy->sband_5g;
1097 
1098 	*idx -= phy->sband_5g.sband.n_channels;
1099 	if (*idx < phy->sband_6g.sband.n_channels)
1100 		return &phy->sband_6g;
1101 
1102 	*idx -= phy->sband_6g.sband.n_channels;
1103 	return NULL;
1104 }
1105 
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1106 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1107 		    struct survey_info *survey)
1108 {
1109 	struct mt76_phy *phy = hw->priv;
1110 	struct mt76_dev *dev = phy->dev;
1111 	struct mt76_sband *sband = NULL;
1112 	struct ieee80211_channel *chan;
1113 	struct mt76_channel_state *state;
1114 	int phy_idx = 0;
1115 	int ret = 0;
1116 
1117 	mutex_lock(&dev->mutex);
1118 
1119 	for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1120 		sband = NULL;
1121 		phy = dev->phys[phy_idx];
1122 		if (!phy || phy->hw != hw)
1123 			continue;
1124 
1125 		sband = mt76_get_survey_sband(phy, &idx);
1126 
1127 		if (idx == 0 && phy->dev->drv->update_survey)
1128 			mt76_update_survey(phy);
1129 
1130 		if (sband || !hw->wiphy->n_radio)
1131 			break;
1132 	}
1133 
1134 	if (!sband) {
1135 		ret = -ENOENT;
1136 		goto out;
1137 	}
1138 
1139 	chan = &sband->sband.channels[idx];
1140 	state = mt76_channel_state(phy, chan);
1141 
1142 	memset(survey, 0, sizeof(*survey));
1143 	survey->channel = chan;
1144 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1145 	survey->filled |= dev->drv->survey_flags;
1146 	if (state->noise)
1147 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1148 
1149 	if (chan == phy->main_chandef.chan) {
1150 		survey->filled |= SURVEY_INFO_IN_USE;
1151 
1152 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1153 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1154 	}
1155 
1156 	survey->time_busy = div_u64(state->cc_busy, 1000);
1157 	survey->time_rx = div_u64(state->cc_rx, 1000);
1158 	survey->time = div_u64(state->cc_active, 1000);
1159 	survey->noise = state->noise;
1160 
1161 	spin_lock_bh(&dev->cc_lock);
1162 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1163 	survey->time_tx = div_u64(state->cc_tx, 1000);
1164 	spin_unlock_bh(&dev->cc_lock);
1165 
1166 out:
1167 	mutex_unlock(&dev->mutex);
1168 
1169 	return ret;
1170 }
1171 EXPORT_SYMBOL_GPL(mt76_get_survey);
1172 
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1173 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1174 			 struct ieee80211_key_conf *key)
1175 {
1176 	struct ieee80211_key_seq seq;
1177 	int i;
1178 
1179 	wcid->rx_check_pn = false;
1180 
1181 	if (!key)
1182 		return;
1183 
1184 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1185 		return;
1186 
1187 	wcid->rx_check_pn = true;
1188 
1189 	/* data frame */
1190 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1191 		ieee80211_get_key_rx_seq(key, i, &seq);
1192 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1193 	}
1194 
1195 	/* robust management frame */
1196 	ieee80211_get_key_rx_seq(key, -1, &seq);
1197 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1198 
1199 }
1200 EXPORT_SYMBOL(mt76_wcid_key_setup);
1201 
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1202 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1203 {
1204 	int signal = -128;
1205 	u8 chains;
1206 
1207 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1208 		int cur, diff;
1209 
1210 		cur = *chain_signal;
1211 		if (!(chains & BIT(0)) ||
1212 		    cur > 0)
1213 			continue;
1214 
1215 		if (cur > signal)
1216 			swap(cur, signal);
1217 
1218 		diff = signal - cur;
1219 		if (diff == 0)
1220 			signal += 3;
1221 		else if (diff <= 2)
1222 			signal += 2;
1223 		else if (diff <= 6)
1224 			signal += 1;
1225 	}
1226 
1227 	return signal;
1228 }
1229 EXPORT_SYMBOL(mt76_rx_signal);
1230 
1231 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1232 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1233 		struct ieee80211_hw **hw,
1234 		struct ieee80211_sta **sta)
1235 {
1236 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1237 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1238 	struct mt76_rx_status mstat;
1239 
1240 	mstat = *((struct mt76_rx_status *)skb->cb);
1241 	memset(status, 0, sizeof(*status));
1242 
1243 	skb->priority = mstat.qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1244 
1245 	status->flag = mstat.flag;
1246 	status->freq = mstat.freq;
1247 	status->enc_flags = mstat.enc_flags;
1248 	status->encoding = mstat.encoding;
1249 	status->bw = mstat.bw;
1250 	if (status->encoding == RX_ENC_EHT) {
1251 		status->eht.ru = mstat.eht.ru;
1252 		status->eht.gi = mstat.eht.gi;
1253 	} else {
1254 		status->he_ru = mstat.he_ru;
1255 		status->he_gi = mstat.he_gi;
1256 		status->he_dcm = mstat.he_dcm;
1257 	}
1258 	status->rate_idx = mstat.rate_idx;
1259 	status->nss = mstat.nss;
1260 	status->band = mstat.band;
1261 	status->signal = mstat.signal;
1262 	status->chains = mstat.chains;
1263 	status->ampdu_reference = mstat.ampdu_ref;
1264 	status->device_timestamp = mstat.timestamp;
1265 	status->mactime = mstat.timestamp;
1266 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1267 	if (status->signal <= -128)
1268 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1269 
1270 	if (ieee80211_is_beacon(hdr->frame_control) ||
1271 	    ieee80211_is_probe_resp(hdr->frame_control))
1272 		status->boottime_ns = ktime_get_boottime_ns();
1273 
1274 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1275 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1276 		     sizeof(mstat.chain_signal));
1277 	memcpy(status->chain_signal, mstat.chain_signal,
1278 	       sizeof(mstat.chain_signal));
1279 
1280 	if (mstat.wcid) {
1281 		status->link_valid = mstat.wcid->link_valid;
1282 		status->link_id = mstat.wcid->link_id;
1283 	}
1284 
1285 	*sta = wcid_to_sta(mstat.wcid);
1286 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1287 }
1288 
1289 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1290 mt76_check_ccmp_pn(struct sk_buff *skb)
1291 {
1292 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1293 	struct mt76_wcid *wcid = status->wcid;
1294 	struct ieee80211_hdr *hdr;
1295 	int security_idx;
1296 	int ret;
1297 
1298 	if (!(status->flag & RX_FLAG_DECRYPTED))
1299 		return;
1300 
1301 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1302 		return;
1303 
1304 	if (!wcid || !wcid->rx_check_pn)
1305 		return;
1306 
1307 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1308 	if (status->flag & RX_FLAG_8023)
1309 		goto skip_hdr_check;
1310 
1311 	hdr = mt76_skb_get_hdr(skb);
1312 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1313 		/*
1314 		 * Validate the first fragment both here and in mac80211
1315 		 * All further fragments will be validated by mac80211 only.
1316 		 */
1317 		if (ieee80211_is_frag(hdr) &&
1318 		    !ieee80211_is_first_frag(hdr->frame_control))
1319 			return;
1320 	}
1321 
1322 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1323 	 *
1324 	 * the recipient shall maintain a single replay counter for received
1325 	 * individually addressed robust Management frames that are received
1326 	 * with the To DS subfield equal to 0, [...]
1327 	 */
1328 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1329 	    !ieee80211_has_tods(hdr->frame_control))
1330 		security_idx = IEEE80211_NUM_TIDS;
1331 
1332 skip_hdr_check:
1333 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1334 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1335 		     sizeof(status->iv));
1336 	if (ret <= 0) {
1337 		status->flag |= RX_FLAG_ONLY_MONITOR;
1338 		return;
1339 	}
1340 
1341 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1342 
1343 	if (status->flag & RX_FLAG_IV_STRIPPED)
1344 		status->flag |= RX_FLAG_PN_VALIDATED;
1345 }
1346 
1347 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1348 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1349 		    int len)
1350 {
1351 	struct mt76_wcid *wcid = status->wcid;
1352 	struct ieee80211_rx_status info = {
1353 		.enc_flags = status->enc_flags,
1354 		.rate_idx = status->rate_idx,
1355 		.encoding = status->encoding,
1356 		.band = status->band,
1357 		.nss = status->nss,
1358 		.bw = status->bw,
1359 	};
1360 	struct ieee80211_sta *sta;
1361 	u32 airtime;
1362 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1363 
1364 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1365 	spin_lock(&dev->cc_lock);
1366 	dev->cur_cc_bss_rx += airtime;
1367 	spin_unlock(&dev->cc_lock);
1368 
1369 	if (!wcid || !wcid->sta)
1370 		return;
1371 
1372 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1373 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1374 }
1375 
1376 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1377 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1378 {
1379 	struct mt76_wcid *wcid;
1380 	int wcid_idx;
1381 
1382 	if (!dev->rx_ampdu_len)
1383 		return;
1384 
1385 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1386 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1387 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1388 	else
1389 		wcid = NULL;
1390 	dev->rx_ampdu_status.wcid = wcid;
1391 
1392 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1393 
1394 	dev->rx_ampdu_len = 0;
1395 	dev->rx_ampdu_ref = 0;
1396 }
1397 
1398 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1399 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1400 {
1401 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1402 	struct mt76_wcid *wcid = status->wcid;
1403 
1404 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1405 		return;
1406 
1407 	if (!wcid || !wcid->sta) {
1408 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1409 
1410 		if (status->flag & RX_FLAG_8023)
1411 			return;
1412 
1413 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1414 			return;
1415 
1416 		wcid = NULL;
1417 	}
1418 
1419 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1420 	    status->ampdu_ref != dev->rx_ampdu_ref)
1421 		mt76_airtime_flush_ampdu(dev);
1422 
1423 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1424 		if (!dev->rx_ampdu_len ||
1425 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1426 			dev->rx_ampdu_status = *status;
1427 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1428 			dev->rx_ampdu_ref = status->ampdu_ref;
1429 		}
1430 
1431 		dev->rx_ampdu_len += skb->len;
1432 		return;
1433 	}
1434 
1435 	mt76_airtime_report(dev, status, skb->len);
1436 }
1437 
1438 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1439 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1440 {
1441 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1442 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1443 	struct ieee80211_sta *sta;
1444 	struct ieee80211_hw *hw;
1445 	struct mt76_wcid *wcid = status->wcid;
1446 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1447 	bool ps;
1448 
1449 	hw = mt76_phy_hw(dev, status->phy_idx);
1450 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1451 	    !(status->flag & RX_FLAG_8023)) {
1452 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1453 		if (sta)
1454 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1455 	}
1456 
1457 	mt76_airtime_check(dev, skb);
1458 
1459 	if (!wcid || !wcid->sta)
1460 		return;
1461 
1462 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1463 
1464 	if (status->signal <= 0)
1465 		ewma_signal_add(&wcid->rssi, -status->signal);
1466 
1467 	wcid->inactive_count = 0;
1468 
1469 	if (status->flag & RX_FLAG_8023)
1470 		return;
1471 
1472 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1473 		return;
1474 
1475 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1476 		ieee80211_sta_pspoll(sta);
1477 		return;
1478 	}
1479 
1480 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1481 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1482 	      ieee80211_is_data(hdr->frame_control)))
1483 		return;
1484 
1485 	ps = ieee80211_has_pm(hdr->frame_control);
1486 
1487 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1488 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1489 		ieee80211_sta_uapsd_trigger(sta, tidno);
1490 
1491 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1492 		return;
1493 
1494 	if (ps)
1495 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1496 
1497 	if (dev->drv->sta_ps)
1498 		dev->drv->sta_ps(dev, sta, ps);
1499 
1500 	if (!ps)
1501 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1502 
1503 	ieee80211_sta_ps_transition(sta, ps);
1504 }
1505 
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1506 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1507 		      struct napi_struct *napi)
1508 {
1509 	struct ieee80211_sta *sta;
1510 	struct ieee80211_hw *hw;
1511 	struct sk_buff *skb, *tmp;
1512 	LIST_HEAD(list);
1513 
1514 	spin_lock(&dev->rx_lock);
1515 	while ((skb = __skb_dequeue(frames)) != NULL) {
1516 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1517 
1518 		mt76_check_ccmp_pn(skb);
1519 		skb_shinfo(skb)->frag_list = NULL;
1520 		mt76_rx_convert(dev, skb, &hw, &sta);
1521 		ieee80211_rx_list(hw, sta, skb, &list);
1522 
1523 		/* subsequent amsdu frames */
1524 		while (nskb) {
1525 			skb = nskb;
1526 			nskb = nskb->next;
1527 			skb->next = NULL;
1528 
1529 			mt76_rx_convert(dev, skb, &hw, &sta);
1530 			ieee80211_rx_list(hw, sta, skb, &list);
1531 		}
1532 	}
1533 	spin_unlock(&dev->rx_lock);
1534 
1535 	if (!napi) {
1536 		netif_receive_skb_list(&list);
1537 		return;
1538 	}
1539 
1540 	list_for_each_entry_safe(skb, tmp, &list, list) {
1541 		skb_list_del_init(skb);
1542 		napi_gro_receive(napi, skb);
1543 	}
1544 }
1545 
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1546 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1547 			   struct napi_struct *napi)
1548 {
1549 	struct sk_buff_head frames;
1550 	struct sk_buff *skb;
1551 
1552 	__skb_queue_head_init(&frames);
1553 
1554 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1555 		mt76_check_sta(dev, skb);
1556 		if (mtk_wed_device_active(&dev->mmio.wed))
1557 			__skb_queue_tail(&frames, skb);
1558 		else
1559 			mt76_rx_aggr_reorder(skb, &frames);
1560 	}
1561 
1562 	mt76_rx_complete(dev, &frames, napi);
1563 }
1564 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1565 
1566 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1567 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1568 	     struct ieee80211_sta *sta)
1569 {
1570 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1571 	struct mt76_dev *dev = phy->dev;
1572 	int ret;
1573 	int i;
1574 
1575 	mutex_lock(&dev->mutex);
1576 
1577 	ret = dev->drv->sta_add(dev, vif, sta);
1578 	if (ret)
1579 		goto out;
1580 
1581 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1582 		struct mt76_txq *mtxq;
1583 
1584 		if (!sta->txq[i])
1585 			continue;
1586 
1587 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1588 		mtxq->wcid = wcid->idx;
1589 	}
1590 
1591 	ewma_signal_init(&wcid->rssi);
1592 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1593 	phy->num_sta++;
1594 
1595 	mt76_wcid_init(wcid, phy->band_idx);
1596 out:
1597 	mutex_unlock(&dev->mutex);
1598 
1599 	return ret;
1600 }
1601 
__mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1602 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1603 		       struct ieee80211_sta *sta)
1604 {
1605 	struct mt76_dev *dev = phy->dev;
1606 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1607 	int i, idx = wcid->idx;
1608 
1609 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1610 		mt76_rx_aggr_stop(dev, wcid, i);
1611 
1612 	if (dev->drv->sta_remove)
1613 		dev->drv->sta_remove(dev, vif, sta);
1614 
1615 	mt76_wcid_cleanup(dev, wcid);
1616 
1617 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1618 	phy->num_sta--;
1619 }
1620 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1621 
1622 static void
mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1623 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1624 		struct ieee80211_sta *sta)
1625 {
1626 	struct mt76_dev *dev = phy->dev;
1627 
1628 	mutex_lock(&dev->mutex);
1629 	__mt76_sta_remove(phy, vif, sta);
1630 	mutex_unlock(&dev->mutex);
1631 }
1632 
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1633 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1634 		   struct ieee80211_sta *sta,
1635 		   enum ieee80211_sta_state old_state,
1636 		   enum ieee80211_sta_state new_state)
1637 {
1638 	struct mt76_phy *phy = hw->priv;
1639 	struct mt76_dev *dev = phy->dev;
1640 	enum mt76_sta_event ev;
1641 
1642 	phy = mt76_vif_phy(hw, vif);
1643 	if (!phy)
1644 		return -EINVAL;
1645 
1646 	if (old_state == IEEE80211_STA_NOTEXIST &&
1647 	    new_state == IEEE80211_STA_NONE)
1648 		return mt76_sta_add(phy, vif, sta);
1649 
1650 	if (old_state == IEEE80211_STA_NONE &&
1651 	    new_state == IEEE80211_STA_NOTEXIST)
1652 		mt76_sta_remove(phy, vif, sta);
1653 
1654 	if (!dev->drv->sta_event)
1655 		return 0;
1656 
1657 	if (old_state == IEEE80211_STA_AUTH &&
1658 	    new_state == IEEE80211_STA_ASSOC)
1659 		ev = MT76_STA_EVENT_ASSOC;
1660 	else if (old_state == IEEE80211_STA_ASSOC &&
1661 		 new_state == IEEE80211_STA_AUTHORIZED)
1662 		ev = MT76_STA_EVENT_AUTHORIZE;
1663 	else if (old_state == IEEE80211_STA_ASSOC &&
1664 		 new_state == IEEE80211_STA_AUTH)
1665 		ev = MT76_STA_EVENT_DISASSOC;
1666 	else
1667 		return 0;
1668 
1669 	return dev->drv->sta_event(dev, vif, sta, ev);
1670 }
1671 EXPORT_SYMBOL_GPL(mt76_sta_state);
1672 
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1673 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1674 			     struct ieee80211_sta *sta)
1675 {
1676 	struct mt76_phy *phy = hw->priv;
1677 	struct mt76_dev *dev = phy->dev;
1678 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1679 
1680 	mutex_lock(&dev->mutex);
1681 	spin_lock_bh(&dev->status_lock);
1682 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1683 	spin_unlock_bh(&dev->status_lock);
1684 	mutex_unlock(&dev->mutex);
1685 }
1686 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1687 
mt76_wcid_init(struct mt76_wcid * wcid,u8 band_idx)1688 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1689 {
1690 	wcid->hw_key_idx = -1;
1691 	wcid->phy_idx = band_idx;
1692 
1693 	INIT_LIST_HEAD(&wcid->tx_list);
1694 	skb_queue_head_init(&wcid->tx_pending);
1695 	skb_queue_head_init(&wcid->tx_offchannel);
1696 
1697 	INIT_LIST_HEAD(&wcid->list);
1698 	idr_init(&wcid->pktid);
1699 
1700 	INIT_LIST_HEAD(&wcid->poll_list);
1701 }
1702 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1703 
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1704 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1705 {
1706 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1707 	struct ieee80211_hw *hw;
1708 	struct sk_buff_head list;
1709 	struct sk_buff *skb;
1710 
1711 	mt76_tx_status_lock(dev, &list);
1712 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1713 	mt76_tx_status_unlock(dev, &list);
1714 
1715 	idr_destroy(&wcid->pktid);
1716 
1717 	spin_lock_bh(&phy->tx_lock);
1718 
1719 	if (!list_empty(&wcid->tx_list))
1720 		list_del_init(&wcid->tx_list);
1721 
1722 	spin_lock(&wcid->tx_pending.lock);
1723 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1724 	spin_unlock(&wcid->tx_pending.lock);
1725 
1726 	spin_lock(&wcid->tx_offchannel.lock);
1727 	skb_queue_splice_tail_init(&wcid->tx_offchannel, &list);
1728 	spin_unlock(&wcid->tx_offchannel.lock);
1729 
1730 	spin_unlock_bh(&phy->tx_lock);
1731 
1732 	while ((skb = __skb_dequeue(&list)) != NULL) {
1733 		hw = mt76_tx_status_get_hw(dev, skb);
1734 		ieee80211_free_txskb(hw, skb);
1735 	}
1736 }
1737 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1738 
mt76_wcid_add_poll(struct mt76_dev * dev,struct mt76_wcid * wcid)1739 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1740 {
1741 	if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
1742 		return;
1743 
1744 	spin_lock_bh(&dev->sta_poll_lock);
1745 	if (list_empty(&wcid->poll_list))
1746 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1747 	spin_unlock_bh(&dev->sta_poll_lock);
1748 }
1749 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1750 
mt76_get_power_bound(struct mt76_phy * phy,s8 txpower)1751 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
1752 {
1753 	int n_chains = hweight16(phy->chainmask);
1754 
1755 	txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
1756 	txpower -= mt76_tx_power_path_delta(n_chains);
1757 
1758 	return txpower;
1759 }
1760 EXPORT_SYMBOL_GPL(mt76_get_power_bound);
1761 
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,int * dbm)1762 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1763 		     unsigned int link_id, int *dbm)
1764 {
1765 	struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1766 	int n_chains, delta;
1767 
1768 	if (!phy)
1769 		return -EINVAL;
1770 
1771 	n_chains = hweight16(phy->chainmask);
1772 	delta = mt76_tx_power_path_delta(n_chains);
1773 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1774 
1775 	return 0;
1776 }
1777 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1778 
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1779 int mt76_init_sar_power(struct ieee80211_hw *hw,
1780 			const struct cfg80211_sar_specs *sar)
1781 {
1782 	struct mt76_phy *phy = hw->priv;
1783 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1784 	int i;
1785 
1786 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1787 		return -EINVAL;
1788 
1789 	for (i = 0; i < sar->num_sub_specs; i++) {
1790 		u32 index = sar->sub_specs[i].freq_range_index;
1791 		/* SAR specifies power limitaton in 0.25dbm */
1792 		s32 power = sar->sub_specs[i].power >> 1;
1793 
1794 		if (power > 127 || power < -127)
1795 			power = 127;
1796 
1797 		phy->frp[index].range = &capa->freq_ranges[index];
1798 		phy->frp[index].power = power;
1799 	}
1800 
1801 	return 0;
1802 }
1803 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1804 
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1805 int mt76_get_sar_power(struct mt76_phy *phy,
1806 		       struct ieee80211_channel *chan,
1807 		       int power)
1808 {
1809 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1810 	int freq, i;
1811 
1812 	if (!capa || !phy->frp)
1813 		return power;
1814 
1815 	if (power > 127 || power < -127)
1816 		power = 127;
1817 
1818 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1819 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1820 		if (phy->frp[i].range &&
1821 		    freq >= phy->frp[i].range->start_freq &&
1822 		    freq < phy->frp[i].range->end_freq) {
1823 			power = min_t(int, phy->frp[i].power, power);
1824 			break;
1825 		}
1826 	}
1827 
1828 	return power;
1829 }
1830 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1831 
1832 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1833 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1834 {
1835 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1836 		ieee80211_csa_finish(vif, 0);
1837 }
1838 
mt76_csa_finish(struct mt76_dev * dev)1839 void mt76_csa_finish(struct mt76_dev *dev)
1840 {
1841 	if (!dev->csa_complete)
1842 		return;
1843 
1844 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1845 		IEEE80211_IFACE_ITER_RESUME_ALL,
1846 		__mt76_csa_finish, dev);
1847 
1848 	dev->csa_complete = 0;
1849 }
1850 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1851 
1852 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1853 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1854 {
1855 	struct mt76_dev *dev = priv;
1856 
1857 	if (!vif->bss_conf.csa_active)
1858 		return;
1859 
1860 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1861 }
1862 
mt76_csa_check(struct mt76_dev * dev)1863 void mt76_csa_check(struct mt76_dev *dev)
1864 {
1865 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1866 		IEEE80211_IFACE_ITER_RESUME_ALL,
1867 		__mt76_csa_check, dev);
1868 }
1869 EXPORT_SYMBOL_GPL(mt76_csa_check);
1870 
1871 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1872 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1873 {
1874 	return 0;
1875 }
1876 EXPORT_SYMBOL_GPL(mt76_set_tim);
1877 
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1878 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1879 {
1880 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1881 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1882 	u8 *hdr, *pn = status->iv;
1883 
1884 	__skb_push(skb, 8);
1885 	memmove(skb->data, skb->data + 8, hdr_len);
1886 	hdr = skb->data + hdr_len;
1887 
1888 	hdr[0] = pn[5];
1889 	hdr[1] = pn[4];
1890 	hdr[2] = 0;
1891 	hdr[3] = 0x20 | (key_id << 6);
1892 	hdr[4] = pn[3];
1893 	hdr[5] = pn[2];
1894 	hdr[6] = pn[1];
1895 	hdr[7] = pn[0];
1896 
1897 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1898 }
1899 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1900 
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1901 int mt76_get_rate(struct mt76_dev *dev,
1902 		  struct ieee80211_supported_band *sband,
1903 		  int idx, bool cck)
1904 {
1905 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1906 	int i, offset = 0, len = sband->n_bitrates;
1907 
1908 	if (cck) {
1909 		if (!is_2g)
1910 			return 0;
1911 
1912 		idx &= ~BIT(2); /* short preamble */
1913 	} else if (is_2g) {
1914 		offset = 4;
1915 	}
1916 
1917 	for (i = offset; i < len; i++) {
1918 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1919 			return i;
1920 	}
1921 
1922 	return 0;
1923 }
1924 EXPORT_SYMBOL_GPL(mt76_get_rate);
1925 
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1926 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1927 		  const u8 *mac)
1928 {
1929 	struct mt76_phy *phy = hw->priv;
1930 
1931 	set_bit(MT76_SCANNING, &phy->state);
1932 }
1933 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1934 
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1935 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1936 {
1937 	struct mt76_phy *phy = hw->priv;
1938 
1939 	clear_bit(MT76_SCANNING, &phy->state);
1940 }
1941 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1942 
mt76_get_antenna(struct ieee80211_hw * hw,int radio_idx,u32 * tx_ant,u32 * rx_ant)1943 int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
1944 		     u32 *rx_ant)
1945 {
1946 	struct mt76_phy *phy = hw->priv;
1947 	struct mt76_dev *dev = phy->dev;
1948 	int i;
1949 
1950 	mutex_lock(&dev->mutex);
1951 	*tx_ant = 0;
1952 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1953 		if (dev->phys[i] && dev->phys[i]->hw == hw)
1954 			*tx_ant |= dev->phys[i]->chainmask;
1955 	*rx_ant = *tx_ant;
1956 	mutex_unlock(&dev->mutex);
1957 
1958 	return 0;
1959 }
1960 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1961 
1962 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)1963 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1964 		int ring_base, void *wed, u32 flags)
1965 {
1966 	struct mt76_queue *hwq;
1967 	int err;
1968 
1969 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1970 	if (!hwq)
1971 		return ERR_PTR(-ENOMEM);
1972 
1973 	hwq->flags = flags;
1974 	hwq->wed = wed;
1975 
1976 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1977 	if (err < 0)
1978 		return ERR_PTR(err);
1979 
1980 	return hwq;
1981 }
1982 EXPORT_SYMBOL_GPL(mt76_init_queue);
1983 
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1984 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1985 			 struct mt76_sta_stats *stats, bool eht)
1986 {
1987 	int i, ei = wi->initial_stat_idx;
1988 	u64 *data = wi->data;
1989 
1990 	wi->sta_count++;
1991 
1992 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1993 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1994 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1995 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1996 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1997 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1998 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1999 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
2000 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
2001 	if (eht) {
2002 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
2003 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
2004 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
2005 	}
2006 
2007 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
2008 		data[ei++] += stats->tx_bw[i];
2009 
2010 	for (i = 0; i < (eht ? 14 : 12); i++)
2011 		data[ei++] += stats->tx_mcs[i];
2012 
2013 	for (i = 0; i < 4; i++)
2014 		data[ei++] += stats->tx_nss[i];
2015 
2016 	wi->worker_stat_count = ei - wi->initial_stat_idx;
2017 }
2018 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
2019 
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)2020 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
2021 {
2022 #ifdef CONFIG_PAGE_POOL_STATS
2023 	struct page_pool_stats stats = {};
2024 	int i;
2025 
2026 	mt76_for_each_q_rx(dev, i)
2027 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
2028 
2029 	page_pool_ethtool_stats_get(data, &stats);
2030 	*index += page_pool_ethtool_stats_get_count();
2031 #endif
2032 }
2033 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
2034 
mt76_phy_dfs_state(struct mt76_phy * phy)2035 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
2036 {
2037 	struct ieee80211_hw *hw = phy->hw;
2038 	struct mt76_dev *dev = phy->dev;
2039 
2040 	if (dev->region == NL80211_DFS_UNSET ||
2041 	    test_bit(MT76_SCANNING, &phy->state))
2042 		return MT_DFS_STATE_DISABLED;
2043 
2044 	if (!phy->radar_enabled) {
2045 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
2046 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
2047 			return MT_DFS_STATE_ACTIVE;
2048 
2049 		return MT_DFS_STATE_DISABLED;
2050 	}
2051 
2052 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
2053 		return MT_DFS_STATE_CAC;
2054 
2055 	return MT_DFS_STATE_ACTIVE;
2056 }
2057 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
2058 
mt76_vif_cleanup(struct mt76_dev * dev,struct ieee80211_vif * vif)2059 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
2060 {
2061 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2062 	struct mt76_vif_data *mvif = mlink->mvif;
2063 
2064 	rcu_assign_pointer(mvif->link[0], NULL);
2065 	mt76_abort_scan(dev);
2066 	if (mvif->roc_phy)
2067 		mt76_abort_roc(mvif->roc_phy);
2068 }
2069 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2070 
mt76_select_links(struct ieee80211_vif * vif,int max_active_links)2071 u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links)
2072 {
2073 	unsigned long usable_links = ieee80211_vif_usable_links(vif);
2074 	struct  {
2075 		u8 link_id;
2076 		enum nl80211_band band;
2077 	} data[IEEE80211_MLD_MAX_NUM_LINKS];
2078 	unsigned int link_id;
2079 	int i, n_data = 0;
2080 	u16 sel_links = 0;
2081 
2082 	if (!ieee80211_vif_is_mld(vif))
2083 		return 0;
2084 
2085 	if (vif->active_links == usable_links)
2086 		return vif->active_links;
2087 
2088 	rcu_read_lock();
2089 	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
2090 		struct ieee80211_bss_conf *link_conf;
2091 
2092 		link_conf = rcu_dereference(vif->link_conf[link_id]);
2093 		if (WARN_ON_ONCE(!link_conf))
2094 			continue;
2095 
2096 		data[n_data].link_id = link_id;
2097 		data[n_data].band = link_conf->chanreq.oper.chan->band;
2098 		n_data++;
2099 	}
2100 	rcu_read_unlock();
2101 
2102 	for (i = 0; i < n_data; i++) {
2103 		int j;
2104 
2105 		if (!(BIT(data[i].link_id) & vif->active_links))
2106 			continue;
2107 
2108 		sel_links = BIT(data[i].link_id);
2109 		for (j = 0; j < n_data; j++) {
2110 			if (data[i].band != data[j].band) {
2111 				sel_links |= BIT(data[j].link_id);
2112 				if (hweight16(sel_links) == max_active_links)
2113 					break;
2114 			}
2115 		}
2116 		break;
2117 	}
2118 
2119 	return sel_links;
2120 }
2121 EXPORT_SYMBOL_GPL(mt76_select_links);
2122