xref: /freebsd/sys/contrib/dev/mediatek/mt76/mac80211.c (revision 8ba4d145d351db26e07695b8e90697398c5dfec2)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #if defined(CONFIG_OF)
7 #include <linux/of.h>
8 #endif
9 #if defined(__FreeBSD__)
10 #include <linux/math64.h>
11 #include <linux/numa.h>
12 #endif
13 #include "mt76.h"
14 
15 #define CHAN2G(_idx, _freq) {			\
16 	.band = NL80211_BAND_2GHZ,		\
17 	.center_freq = (_freq),			\
18 	.hw_value = (_idx),			\
19 	.max_power = 30,			\
20 }
21 
22 #define CHAN5G(_idx, _freq) {			\
23 	.band = NL80211_BAND_5GHZ,		\
24 	.center_freq = (_freq),			\
25 	.hw_value = (_idx),			\
26 	.max_power = 30,			\
27 }
28 
29 #define CHAN6G(_idx, _freq) {			\
30 	.band = NL80211_BAND_6GHZ,		\
31 	.center_freq = (_freq),			\
32 	.hw_value = (_idx),			\
33 	.max_power = 30,			\
34 }
35 
36 static const struct ieee80211_channel mt76_channels_2ghz[] = {
37 	CHAN2G(1, 2412),
38 	CHAN2G(2, 2417),
39 	CHAN2G(3, 2422),
40 	CHAN2G(4, 2427),
41 	CHAN2G(5, 2432),
42 	CHAN2G(6, 2437),
43 	CHAN2G(7, 2442),
44 	CHAN2G(8, 2447),
45 	CHAN2G(9, 2452),
46 	CHAN2G(10, 2457),
47 	CHAN2G(11, 2462),
48 	CHAN2G(12, 2467),
49 	CHAN2G(13, 2472),
50 	CHAN2G(14, 2484),
51 };
52 
53 static const struct ieee80211_channel mt76_channels_5ghz[] = {
54 	CHAN5G(36, 5180),
55 	CHAN5G(40, 5200),
56 	CHAN5G(44, 5220),
57 	CHAN5G(48, 5240),
58 
59 	CHAN5G(52, 5260),
60 	CHAN5G(56, 5280),
61 	CHAN5G(60, 5300),
62 	CHAN5G(64, 5320),
63 
64 	CHAN5G(100, 5500),
65 	CHAN5G(104, 5520),
66 	CHAN5G(108, 5540),
67 	CHAN5G(112, 5560),
68 	CHAN5G(116, 5580),
69 	CHAN5G(120, 5600),
70 	CHAN5G(124, 5620),
71 	CHAN5G(128, 5640),
72 	CHAN5G(132, 5660),
73 	CHAN5G(136, 5680),
74 	CHAN5G(140, 5700),
75 	CHAN5G(144, 5720),
76 
77 	CHAN5G(149, 5745),
78 	CHAN5G(153, 5765),
79 	CHAN5G(157, 5785),
80 	CHAN5G(161, 5805),
81 	CHAN5G(165, 5825),
82 	CHAN5G(169, 5845),
83 	CHAN5G(173, 5865),
84 	CHAN5G(177, 5885),
85 };
86 
87 static const struct ieee80211_channel mt76_channels_6ghz[] = {
88 	/* UNII-5 */
89 	CHAN6G(1, 5955),
90 	CHAN6G(5, 5975),
91 	CHAN6G(9, 5995),
92 	CHAN6G(13, 6015),
93 	CHAN6G(17, 6035),
94 	CHAN6G(21, 6055),
95 	CHAN6G(25, 6075),
96 	CHAN6G(29, 6095),
97 	CHAN6G(33, 6115),
98 	CHAN6G(37, 6135),
99 	CHAN6G(41, 6155),
100 	CHAN6G(45, 6175),
101 	CHAN6G(49, 6195),
102 	CHAN6G(53, 6215),
103 	CHAN6G(57, 6235),
104 	CHAN6G(61, 6255),
105 	CHAN6G(65, 6275),
106 	CHAN6G(69, 6295),
107 	CHAN6G(73, 6315),
108 	CHAN6G(77, 6335),
109 	CHAN6G(81, 6355),
110 	CHAN6G(85, 6375),
111 	CHAN6G(89, 6395),
112 	CHAN6G(93, 6415),
113 	/* UNII-6 */
114 	CHAN6G(97, 6435),
115 	CHAN6G(101, 6455),
116 	CHAN6G(105, 6475),
117 	CHAN6G(109, 6495),
118 	CHAN6G(113, 6515),
119 	CHAN6G(117, 6535),
120 	/* UNII-7 */
121 	CHAN6G(121, 6555),
122 	CHAN6G(125, 6575),
123 	CHAN6G(129, 6595),
124 	CHAN6G(133, 6615),
125 	CHAN6G(137, 6635),
126 	CHAN6G(141, 6655),
127 	CHAN6G(145, 6675),
128 	CHAN6G(149, 6695),
129 	CHAN6G(153, 6715),
130 	CHAN6G(157, 6735),
131 	CHAN6G(161, 6755),
132 	CHAN6G(165, 6775),
133 	CHAN6G(169, 6795),
134 	CHAN6G(173, 6815),
135 	CHAN6G(177, 6835),
136 	CHAN6G(181, 6855),
137 	CHAN6G(185, 6875),
138 	/* UNII-8 */
139 	CHAN6G(189, 6895),
140 	CHAN6G(193, 6915),
141 	CHAN6G(197, 6935),
142 	CHAN6G(201, 6955),
143 	CHAN6G(205, 6975),
144 	CHAN6G(209, 6995),
145 	CHAN6G(213, 7015),
146 	CHAN6G(217, 7035),
147 	CHAN6G(221, 7055),
148 	CHAN6G(225, 7075),
149 	CHAN6G(229, 7095),
150 	CHAN6G(233, 7115),
151 };
152 
153 #if defined(CONFIG_MT76_LEDS)
154 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
155 	{ .throughput =   0 * 1024, .blink_time = 334 },
156 	{ .throughput =   1 * 1024, .blink_time = 260 },
157 	{ .throughput =   5 * 1024, .blink_time = 220 },
158 	{ .throughput =  10 * 1024, .blink_time = 190 },
159 	{ .throughput =  20 * 1024, .blink_time = 170 },
160 	{ .throughput =  50 * 1024, .blink_time = 150 },
161 	{ .throughput =  70 * 1024, .blink_time = 130 },
162 	{ .throughput = 100 * 1024, .blink_time = 110 },
163 	{ .throughput = 200 * 1024, .blink_time =  80 },
164 	{ .throughput = 300 * 1024, .blink_time =  50 },
165 };
166 #endif
167 
168 struct ieee80211_rate mt76_rates[] = {
169 	CCK_RATE(0, 10),
170 	CCK_RATE(1, 20),
171 	CCK_RATE(2, 55),
172 	CCK_RATE(3, 110),
173 	OFDM_RATE(11, 60),
174 	OFDM_RATE(15, 90),
175 	OFDM_RATE(10, 120),
176 	OFDM_RATE(14, 180),
177 	OFDM_RATE(9,  240),
178 	OFDM_RATE(13, 360),
179 	OFDM_RATE(8,  480),
180 	OFDM_RATE(12, 540),
181 };
182 EXPORT_SYMBOL_GPL(mt76_rates);
183 
184 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
185 	{ .start_freq = 2402, .end_freq = 2494, },
186 	{ .start_freq = 5150, .end_freq = 5350, },
187 	{ .start_freq = 5350, .end_freq = 5470, },
188 	{ .start_freq = 5470, .end_freq = 5725, },
189 	{ .start_freq = 5725, .end_freq = 5950, },
190 	{ .start_freq = 5945, .end_freq = 6165, },
191 	{ .start_freq = 6165, .end_freq = 6405, },
192 	{ .start_freq = 6405, .end_freq = 6525, },
193 	{ .start_freq = 6525, .end_freq = 6705, },
194 	{ .start_freq = 6705, .end_freq = 6865, },
195 	{ .start_freq = 6865, .end_freq = 7125, },
196 };
197 
198 static const struct cfg80211_sar_capa mt76_sar_capa = {
199 	.type = NL80211_SAR_TYPE_POWER,
200 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
201 	.freq_ranges = &mt76_sar_freq_ranges[0],
202 };
203 
204 #if defined(CONFIG_MT76_LEDS)
mt76_led_init(struct mt76_phy * phy)205 static int mt76_led_init(struct mt76_phy *phy)
206 {
207 	struct mt76_dev *dev = phy->dev;
208 	struct ieee80211_hw *hw = phy->hw;
209 	struct device_node *np = dev->dev->of_node;
210 
211 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
212 		return 0;
213 
214 	np = of_get_child_by_name(np, "led");
215 	if (np) {
216 		if (!of_device_is_available(np)) {
217 			of_node_put(np);
218 			dev_info(dev->dev,
219 				"led registration was explicitly disabled by dts\n");
220 			return 0;
221 		}
222 
223 		if (phy == &dev->phy) {
224 			int led_pin;
225 
226 			if (!of_property_read_u32(np, "led-sources", &led_pin))
227 				phy->leds.pin = led_pin;
228 
229 			phy->leds.al =
230 				of_property_read_bool(np, "led-active-low");
231 		}
232 
233 		of_node_put(np);
234 	}
235 
236 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
237 		 wiphy_name(hw->wiphy));
238 
239 	phy->leds.cdev.name = phy->leds.name;
240 	phy->leds.cdev.default_trigger =
241 		ieee80211_create_tpt_led_trigger(hw,
242 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
243 					mt76_tpt_blink,
244 					ARRAY_SIZE(mt76_tpt_blink));
245 
246 	dev_info(dev->dev,
247 		"registering led '%s'\n", phy->leds.name);
248 
249 	return led_classdev_register(dev->dev, &phy->leds.cdev);
250 }
251 
mt76_led_cleanup(struct mt76_phy * phy)252 static void mt76_led_cleanup(struct mt76_phy *phy)
253 {
254 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
255 		return;
256 
257 	led_classdev_unregister(&phy->leds.cdev);
258 }
259 #endif
260 
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)261 static void mt76_init_stream_cap(struct mt76_phy *phy,
262 				 struct ieee80211_supported_band *sband,
263 				 bool vht)
264 {
265 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
266 	int i, nstream = hweight8(phy->antenna_mask);
267 	struct ieee80211_sta_vht_cap *vht_cap;
268 	u16 mcs_map = 0;
269 
270 	if (nstream > 1)
271 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
272 	else
273 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
274 
275 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
276 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
277 
278 	if (!vht)
279 		return;
280 
281 	vht_cap = &sband->vht_cap;
282 	if (nstream > 1)
283 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
284 	else
285 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
286 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
287 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
288 
289 	for (i = 0; i < 8; i++) {
290 		if (i < nstream)
291 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
292 		else
293 			mcs_map |=
294 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
295 	}
296 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
297 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
298 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
299 		vht_cap->vht_mcs.tx_highest |=
300 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
301 }
302 
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)303 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
304 {
305 	if (phy->cap.has_2ghz)
306 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
307 	if (phy->cap.has_5ghz)
308 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
309 	if (phy->cap.has_6ghz)
310 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
311 }
312 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
313 
314 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)315 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
316 		const struct ieee80211_channel *chan, int n_chan,
317 		struct ieee80211_rate *rates, int n_rates,
318 		bool ht, bool vht)
319 {
320 	struct ieee80211_supported_band *sband = &msband->sband;
321 	struct ieee80211_sta_vht_cap *vht_cap;
322 	struct ieee80211_sta_ht_cap *ht_cap;
323 	struct mt76_dev *dev = phy->dev;
324 	void *chanlist;
325 	int size;
326 
327 	size = n_chan * sizeof(*chan);
328 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
329 	if (!chanlist)
330 		return -ENOMEM;
331 
332 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
333 				    GFP_KERNEL);
334 	if (!msband->chan)
335 		return -ENOMEM;
336 
337 	sband->channels = chanlist;
338 	sband->n_channels = n_chan;
339 	sband->bitrates = rates;
340 	sband->n_bitrates = n_rates;
341 
342 	if (!ht)
343 		return 0;
344 
345 	ht_cap = &sband->ht_cap;
346 	ht_cap->ht_supported = true;
347 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
348 		       IEEE80211_HT_CAP_GRN_FLD |
349 		       IEEE80211_HT_CAP_SGI_20 |
350 		       IEEE80211_HT_CAP_SGI_40 |
351 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
352 
353 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
354 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
355 
356 	mt76_init_stream_cap(phy, sband, vht);
357 
358 	if (!vht)
359 		return 0;
360 
361 	vht_cap = &sband->vht_cap;
362 	vht_cap->vht_supported = true;
363 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
364 			IEEE80211_VHT_CAP_RXSTBC_1 |
365 			IEEE80211_VHT_CAP_SHORT_GI_80 |
366 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
367 
368 	return 0;
369 }
370 
371 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)372 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
373 		   int n_rates)
374 {
375 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
376 
377 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
378 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
379 			       n_rates, true, false);
380 }
381 
382 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)383 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
384 		   int n_rates, bool vht)
385 {
386 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
387 
388 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
389 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
390 			       n_rates, true, vht);
391 }
392 
393 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)394 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
395 		   int n_rates)
396 {
397 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
398 
399 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
400 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
401 			       n_rates, false, false);
402 }
403 
404 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)405 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
406 		 enum nl80211_band band)
407 {
408 	struct ieee80211_supported_band *sband = &msband->sband;
409 	bool found = false;
410 	int i;
411 
412 	if (!sband)
413 		return;
414 
415 	for (i = 0; i < sband->n_channels; i++) {
416 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
417 			continue;
418 
419 		found = true;
420 		break;
421 	}
422 
423 	if (found) {
424 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
425 					NL80211_CHAN_HT20);
426 		phy->chan_state = &msband->chan[0];
427 		phy->dev->band_phys[band] = phy;
428 		return;
429 	}
430 
431 	sband->n_channels = 0;
432 	if (phy->hw->wiphy->bands[band] == sband)
433 		phy->hw->wiphy->bands[band] = NULL;
434 }
435 
436 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)437 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
438 {
439 	struct mt76_dev *dev = phy->dev;
440 	struct wiphy *wiphy = hw->wiphy;
441 
442 	INIT_LIST_HEAD(&phy->tx_list);
443 	spin_lock_init(&phy->tx_lock);
444 	INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
445 
446 	if ((void *)phy != hw->priv)
447 		return 0;
448 
449 	SET_IEEE80211_DEV(hw, dev->dev);
450 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
451 
452 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
453 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
454 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
455 			WIPHY_FLAG_SUPPORTS_TDLS |
456 			WIPHY_FLAG_AP_UAPSD;
457 
458 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
459 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
460 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
461 
462 	wiphy->available_antennas_tx = phy->antenna_mask;
463 	wiphy->available_antennas_rx = phy->antenna_mask;
464 
465 	wiphy->sar_capa = &mt76_sar_capa;
466 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
467 				sizeof(struct mt76_freq_range_power),
468 				GFP_KERNEL);
469 	if (!phy->frp)
470 		return -ENOMEM;
471 
472 	hw->txq_data_size = sizeof(struct mt76_txq);
473 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
474 
475 	if (!hw->max_tx_fragments)
476 		hw->max_tx_fragments = 16;
477 
478 	ieee80211_hw_set(hw, SIGNAL_DBM);
479 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
480 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
481 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
482 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
483 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
484 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
485 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
486 
487 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
488 	    hw->max_tx_fragments > 1) {
489 		ieee80211_hw_set(hw, TX_AMSDU);
490 		ieee80211_hw_set(hw, TX_FRAG_LIST);
491 	}
492 
493 	ieee80211_hw_set(hw, MFP_CAPABLE);
494 	ieee80211_hw_set(hw, AP_LINK_PS);
495 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
496 
497 	return 0;
498 }
499 
500 struct mt76_phy *
mt76_alloc_radio_phy(struct mt76_dev * dev,unsigned int size,u8 band_idx)501 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
502 		     u8 band_idx)
503 {
504 	struct ieee80211_hw *hw = dev->phy.hw;
505 	unsigned int phy_size;
506 	struct mt76_phy *phy;
507 
508 	phy_size = ALIGN(sizeof(*phy), 8);
509 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
510 	if (!phy)
511 		return NULL;
512 
513 	phy->dev = dev;
514 	phy->hw = hw;
515 #if defined(__linux__)
516 	phy->priv = (void *)phy + phy_size;
517 #elif defined(__FreeBSD__)
518 	phy->priv = (u8 *)phy + phy_size;
519 #endif
520 	phy->band_idx = band_idx;
521 
522 	return phy;
523 }
524 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
525 
526 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)527 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
528 	       const struct ieee80211_ops *ops, u8 band_idx)
529 {
530 	struct ieee80211_hw *hw;
531 	unsigned int phy_size;
532 	struct mt76_phy *phy;
533 
534 	phy_size = ALIGN(sizeof(*phy), 8);
535 	hw = ieee80211_alloc_hw(size + phy_size, ops);
536 	if (!hw)
537 		return NULL;
538 
539 	phy = hw->priv;
540 	phy->dev = dev;
541 	phy->hw = hw;
542 #if defined(__linux__)
543 	phy->priv = hw->priv + phy_size;
544 #elif defined(__FreeBSD__)
545 	phy->priv = (u8 *)hw->priv + phy_size;
546 #endif
547 	phy->band_idx = band_idx;
548 
549 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
550 	hw->wiphy->interface_modes =
551 		BIT(NL80211_IFTYPE_STATION) |
552 		BIT(NL80211_IFTYPE_AP) |
553 #ifdef CONFIG_MAC80211_MESH
554 		BIT(NL80211_IFTYPE_MESH_POINT) |
555 #endif
556 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
557 		BIT(NL80211_IFTYPE_P2P_GO) |
558 		BIT(NL80211_IFTYPE_ADHOC);
559 
560 	return phy;
561 }
562 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
563 
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)564 int mt76_register_phy(struct mt76_phy *phy, bool vht,
565 		      struct ieee80211_rate *rates, int n_rates)
566 {
567 	int ret;
568 
569 	ret = mt76_phy_init(phy, phy->hw);
570 	if (ret)
571 		return ret;
572 
573 	if (phy->cap.has_2ghz) {
574 		ret = mt76_init_sband_2g(phy, rates, n_rates);
575 		if (ret)
576 			return ret;
577 	}
578 
579 	if (phy->cap.has_5ghz) {
580 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
581 		if (ret)
582 			return ret;
583 	}
584 
585 	if (phy->cap.has_6ghz) {
586 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
587 		if (ret)
588 			return ret;
589 	}
590 
591 #if defined(CONFIG_MT76_LEDS)
592 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
593 		ret = mt76_led_init(phy);
594 		if (ret)
595 			return ret;
596 	}
597 #endif
598 
599 	wiphy_read_of_freq_limits(phy->hw->wiphy);
600 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
601 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
602 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
603 
604 	if ((void *)phy == phy->hw->priv) {
605 		ret = ieee80211_register_hw(phy->hw);
606 		if (ret)
607 			return ret;
608 	}
609 
610 	set_bit(MT76_STATE_REGISTERED, &phy->state);
611 	phy->dev->phys[phy->band_idx] = phy;
612 
613 	return 0;
614 }
615 EXPORT_SYMBOL_GPL(mt76_register_phy);
616 
mt76_unregister_phy(struct mt76_phy * phy)617 void mt76_unregister_phy(struct mt76_phy *phy)
618 {
619 	struct mt76_dev *dev = phy->dev;
620 
621 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
622 		return;
623 
624 #if defined(CONFIG_MT76_LEDS)
625 	if (IS_ENABLED(CONFIG_MT76_LEDS))
626 		mt76_led_cleanup(phy);
627 #endif
628 	mt76_tx_status_check(dev, true);
629 	ieee80211_unregister_hw(phy->hw);
630 	dev->phys[phy->band_idx] = NULL;
631 }
632 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
633 
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)634 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
635 {
636 	bool is_qrx = mt76_queue_is_rx(dev, q);
637 	struct page_pool_params pp_params = {
638 		.order = 0,
639 		.flags = 0,
640 		.nid = NUMA_NO_NODE,
641 		.dev = dev->dma_dev,
642 	};
643 	int idx = is_qrx ? q - dev->q_rx : -1;
644 
645 	/* Allocate page_pools just for rx/wed_tx_free queues */
646 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
647 		return 0;
648 
649 	switch (idx) {
650 	case MT_RXQ_MAIN:
651 	case MT_RXQ_BAND1:
652 	case MT_RXQ_BAND2:
653 		pp_params.pool_size = 256;
654 		break;
655 	default:
656 		pp_params.pool_size = 16;
657 		break;
658 	}
659 
660 	if (mt76_is_mmio(dev)) {
661 		/* rely on page_pool for DMA mapping */
662 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
663 		pp_params.dma_dir = DMA_FROM_DEVICE;
664 		pp_params.max_len = PAGE_SIZE;
665 		pp_params.offset = 0;
666 		/* NAPI is available just for rx queues */
667 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
668 			pp_params.napi = &dev->napi[idx];
669 	}
670 
671 	q->page_pool = page_pool_create(&pp_params);
672 	if (IS_ERR(q->page_pool)) {
673 		int err = PTR_ERR(q->page_pool);
674 
675 		q->page_pool = NULL;
676 		return err;
677 	}
678 
679 	return 0;
680 }
681 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
682 
683 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)684 mt76_alloc_device(struct device *pdev, unsigned int size,
685 		  const struct ieee80211_ops *ops,
686 		  const struct mt76_driver_ops *drv_ops)
687 {
688 	struct ieee80211_hw *hw;
689 	struct mt76_phy *phy;
690 	struct mt76_dev *dev;
691 	int i;
692 
693 	hw = ieee80211_alloc_hw(size, ops);
694 	if (!hw)
695 		return NULL;
696 
697 	dev = hw->priv;
698 	dev->hw = hw;
699 	dev->dev = pdev;
700 	dev->drv = drv_ops;
701 	dev->dma_dev = pdev;
702 
703 	phy = &dev->phy;
704 	phy->dev = dev;
705 	phy->hw = hw;
706 	phy->band_idx = MT_BAND0;
707 	dev->phys[phy->band_idx] = phy;
708 
709 	spin_lock_init(&dev->rx_lock);
710 	spin_lock_init(&dev->lock);
711 	spin_lock_init(&dev->cc_lock);
712 	spin_lock_init(&dev->status_lock);
713 	spin_lock_init(&dev->wed_lock);
714 	mutex_init(&dev->mutex);
715 	init_waitqueue_head(&dev->tx_wait);
716 
717 	skb_queue_head_init(&dev->mcu.res_q);
718 	init_waitqueue_head(&dev->mcu.wait);
719 	mutex_init(&dev->mcu.mutex);
720 	dev->tx_worker.fn = mt76_tx_worker;
721 
722 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
723 	hw->wiphy->interface_modes =
724 		BIT(NL80211_IFTYPE_STATION) |
725 		BIT(NL80211_IFTYPE_AP) |
726 #ifdef CONFIG_MAC80211_MESH
727 		BIT(NL80211_IFTYPE_MESH_POINT) |
728 #endif
729 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
730 		BIT(NL80211_IFTYPE_P2P_GO) |
731 		BIT(NL80211_IFTYPE_ADHOC);
732 
733 	spin_lock_init(&dev->token_lock);
734 	idr_init(&dev->token);
735 
736 	spin_lock_init(&dev->rx_token_lock);
737 	idr_init(&dev->rx_token);
738 
739 	INIT_LIST_HEAD(&dev->wcid_list);
740 	INIT_LIST_HEAD(&dev->sta_poll_list);
741 	spin_lock_init(&dev->sta_poll_lock);
742 
743 	INIT_LIST_HEAD(&dev->txwi_cache);
744 	INIT_LIST_HEAD(&dev->rxwi_cache);
745 	dev->token_size = dev->drv->token_size;
746 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
747 
748 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
749 		skb_queue_head_init(&dev->rx_skb[i]);
750 
751 	dev->wq = alloc_ordered_workqueue("mt76", 0);
752 	if (!dev->wq) {
753 		ieee80211_free_hw(hw);
754 		return NULL;
755 	}
756 
757 	return dev;
758 }
759 EXPORT_SYMBOL_GPL(mt76_alloc_device);
760 
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)761 int mt76_register_device(struct mt76_dev *dev, bool vht,
762 			 struct ieee80211_rate *rates, int n_rates)
763 {
764 	struct ieee80211_hw *hw = dev->hw;
765 	struct mt76_phy *phy = &dev->phy;
766 	int ret;
767 
768 	dev_set_drvdata(dev->dev, dev);
769 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
770 	ret = mt76_phy_init(phy, hw);
771 	if (ret)
772 		return ret;
773 
774 	if (phy->cap.has_2ghz) {
775 		ret = mt76_init_sband_2g(phy, rates, n_rates);
776 		if (ret)
777 			return ret;
778 	}
779 
780 	if (phy->cap.has_5ghz) {
781 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
782 		if (ret)
783 			return ret;
784 	}
785 
786 	if (phy->cap.has_6ghz) {
787 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
788 		if (ret)
789 			return ret;
790 	}
791 
792 	wiphy_read_of_freq_limits(hw->wiphy);
793 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
794 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
795 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
796 
797 #if defined(CONFIG_MT76_LEDS)
798 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
799 		ret = mt76_led_init(phy);
800 		if (ret)
801 			return ret;
802 	}
803 #endif
804 
805 	ret = ieee80211_register_hw(hw);
806 	if (ret)
807 		return ret;
808 
809 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
810 	set_bit(MT76_STATE_REGISTERED, &phy->state);
811 	sched_set_fifo_low(dev->tx_worker.task);
812 
813 	return 0;
814 }
815 EXPORT_SYMBOL_GPL(mt76_register_device);
816 
mt76_unregister_device(struct mt76_dev * dev)817 void mt76_unregister_device(struct mt76_dev *dev)
818 {
819 #if defined(__linux__)
820 	struct ieee80211_hw *hw = dev->hw;
821 #endif
822 
823 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
824 		return;
825 
826 #if defined(CONFIG_MT76_LEDS)
827 	if (IS_ENABLED(CONFIG_MT76_LEDS))
828 		mt76_led_cleanup(&dev->phy);
829 #endif
830 	mt76_tx_status_check(dev, true);
831 	mt76_wcid_cleanup(dev, &dev->global_wcid);
832 #if defined(__linux__)
833 	ieee80211_unregister_hw(hw);
834 #elif defined(__FreeBSD__)
835 	ieee80211_unregister_hw(dev->hw);
836 #endif
837 }
838 EXPORT_SYMBOL_GPL(mt76_unregister_device);
839 
mt76_free_device(struct mt76_dev * dev)840 void mt76_free_device(struct mt76_dev *dev)
841 {
842 	mt76_worker_teardown(&dev->tx_worker);
843 	if (dev->wq) {
844 		destroy_workqueue(dev->wq);
845 		dev->wq = NULL;
846 	}
847 	ieee80211_free_hw(dev->hw);
848 }
849 EXPORT_SYMBOL_GPL(mt76_free_device);
850 
851 static struct mt76_phy *
mt76_vif_phy(struct ieee80211_hw * hw,struct ieee80211_vif * vif)852 mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
853 {
854 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
855 	struct mt76_chanctx *ctx;
856 
857 	if (!hw->wiphy->n_radio)
858 		return hw->priv;
859 
860 	if (!mlink->ctx)
861 		return NULL;
862 
863 	ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
864 	return ctx->phy;
865 }
866 
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)867 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
868 {
869 	struct sk_buff *skb = phy->rx_amsdu[q].head;
870 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
871 	struct mt76_dev *dev = phy->dev;
872 
873 	phy->rx_amsdu[q].head = NULL;
874 	phy->rx_amsdu[q].tail = NULL;
875 
876 	/*
877 	 * Validate if the amsdu has a proper first subframe.
878 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
879 	 * flag of the QoS header gets flipped. In such cases, the first
880 	 * subframe has a LLC/SNAP header in the location of the destination
881 	 * address.
882 	 */
883 	if (skb_shinfo(skb)->frag_list) {
884 		int offset = 0;
885 
886 		if (!(status->flag & RX_FLAG_8023)) {
887 			offset = ieee80211_get_hdrlen_from_skb(skb);
888 
889 			if ((status->flag &
890 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
891 			    RX_FLAG_DECRYPTED)
892 				offset += 8;
893 		}
894 
895 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
896 			dev_kfree_skb(skb);
897 			return;
898 		}
899 	}
900 	__skb_queue_tail(&dev->rx_skb[q], skb);
901 }
902 
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)903 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
904 				  struct sk_buff *skb)
905 {
906 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
907 
908 	if (phy->rx_amsdu[q].head &&
909 	    (!status->amsdu || status->first_amsdu ||
910 	     status->seqno != phy->rx_amsdu[q].seqno))
911 		mt76_rx_release_amsdu(phy, q);
912 
913 	if (!phy->rx_amsdu[q].head) {
914 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
915 		phy->rx_amsdu[q].seqno = status->seqno;
916 		phy->rx_amsdu[q].head = skb;
917 	} else {
918 		*phy->rx_amsdu[q].tail = skb;
919 		phy->rx_amsdu[q].tail = &skb->next;
920 	}
921 
922 	if (!status->amsdu || status->last_amsdu)
923 		mt76_rx_release_amsdu(phy, q);
924 }
925 
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)926 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
927 {
928 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
929 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
930 
931 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
932 		dev_kfree_skb(skb);
933 		return;
934 	}
935 
936 #ifdef CONFIG_NL80211_TESTMODE
937 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
938 		phy->test.rx_stats.packets[q]++;
939 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
940 			phy->test.rx_stats.fcs_error[q]++;
941 	}
942 #endif
943 
944 	mt76_rx_release_burst(phy, q, skb);
945 }
946 EXPORT_SYMBOL_GPL(mt76_rx);
947 
mt76_has_tx_pending(struct mt76_phy * phy)948 bool mt76_has_tx_pending(struct mt76_phy *phy)
949 {
950 	struct mt76_queue *q;
951 	int i;
952 
953 	for (i = 0; i < __MT_TXQ_MAX; i++) {
954 		q = phy->q_tx[i];
955 		if (q && q->queued)
956 			return true;
957 	}
958 
959 	return false;
960 }
961 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
962 
963 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)964 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
965 {
966 	struct mt76_sband *msband;
967 	int idx;
968 
969 	if (c->band == NL80211_BAND_2GHZ)
970 		msband = &phy->sband_2g;
971 	else if (c->band == NL80211_BAND_6GHZ)
972 		msband = &phy->sband_6g;
973 	else
974 		msband = &phy->sband_5g;
975 
976 	idx = c - &msband->sband.channels[0];
977 	return &msband->chan[idx];
978 }
979 
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)980 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
981 {
982 	struct mt76_channel_state *state = phy->chan_state;
983 
984 	state->cc_active += ktime_to_us(ktime_sub(time,
985 						  phy->survey_time));
986 	phy->survey_time = time;
987 }
988 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
989 
mt76_update_survey(struct mt76_phy * phy)990 void mt76_update_survey(struct mt76_phy *phy)
991 {
992 	struct mt76_dev *dev = phy->dev;
993 	ktime_t cur_time;
994 
995 	if (dev->drv->update_survey)
996 		dev->drv->update_survey(phy);
997 
998 	cur_time = ktime_get_boottime();
999 	mt76_update_survey_active_time(phy, cur_time);
1000 
1001 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
1002 		struct mt76_channel_state *state = phy->chan_state;
1003 
1004 		spin_lock_bh(&dev->cc_lock);
1005 		state->cc_bss_rx += dev->cur_cc_bss_rx;
1006 		dev->cur_cc_bss_rx = 0;
1007 		spin_unlock_bh(&dev->cc_lock);
1008 	}
1009 }
1010 EXPORT_SYMBOL_GPL(mt76_update_survey);
1011 
__mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1012 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1013 		       bool offchannel)
1014 {
1015 	struct mt76_dev *dev = phy->dev;
1016 	int timeout = HZ / 5;
1017 	int ret;
1018 
1019 	set_bit(MT76_RESET, &phy->state);
1020 
1021 	mt76_worker_disable(&dev->tx_worker);
1022 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
1023 	mt76_update_survey(phy);
1024 
1025 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
1026 	    phy->chandef.width != chandef->width)
1027 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
1028 
1029 	phy->chandef = *chandef;
1030 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
1031 	phy->offchannel = offchannel;
1032 
1033 	if (!offchannel)
1034 		phy->main_chandef = *chandef;
1035 
1036 	if (chandef->chan != phy->main_chandef.chan)
1037 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1038 
1039 	ret = dev->drv->set_channel(phy);
1040 
1041 	clear_bit(MT76_RESET, &phy->state);
1042 	mt76_worker_enable(&dev->tx_worker);
1043 	mt76_worker_schedule(&dev->tx_worker);
1044 
1045 	return ret;
1046 }
1047 
mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1048 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1049 		     bool offchannel)
1050 {
1051 	struct mt76_dev *dev = phy->dev;
1052 	int ret;
1053 
1054 	cancel_delayed_work_sync(&phy->mac_work);
1055 
1056 	mutex_lock(&dev->mutex);
1057 	ret = __mt76_set_channel(phy, chandef, offchannel);
1058 	mutex_unlock(&dev->mutex);
1059 
1060 	return ret;
1061 }
1062 
mt76_update_channel(struct mt76_phy * phy)1063 int mt76_update_channel(struct mt76_phy *phy)
1064 {
1065 	struct ieee80211_hw *hw = phy->hw;
1066 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1067 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1068 
1069 	phy->radar_enabled = hw->conf.radar_enabled;
1070 
1071 	return mt76_set_channel(phy, chandef, offchannel);
1072 }
1073 EXPORT_SYMBOL_GPL(mt76_update_channel);
1074 
1075 static struct mt76_sband *
mt76_get_survey_sband(struct mt76_phy * phy,int * idx)1076 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1077 {
1078 	if (*idx < phy->sband_2g.sband.n_channels)
1079 		return &phy->sband_2g;
1080 
1081 	*idx -= phy->sband_2g.sband.n_channels;
1082 	if (*idx < phy->sband_5g.sband.n_channels)
1083 		return &phy->sband_5g;
1084 
1085 	*idx -= phy->sband_5g.sband.n_channels;
1086 	if (*idx < phy->sband_6g.sband.n_channels)
1087 		return &phy->sband_6g;
1088 
1089 	*idx -= phy->sband_6g.sband.n_channels;
1090 	return NULL;
1091 }
1092 
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1093 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1094 		    struct survey_info *survey)
1095 {
1096 	struct mt76_phy *phy = hw->priv;
1097 	struct mt76_dev *dev = phy->dev;
1098 	struct mt76_sband *sband = NULL;
1099 	struct ieee80211_channel *chan;
1100 	struct mt76_channel_state *state;
1101 	int phy_idx = 0;
1102 	int ret = 0;
1103 
1104 	mutex_lock(&dev->mutex);
1105 
1106 	for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1107 		sband = NULL;
1108 		phy = dev->phys[phy_idx];
1109 		if (!phy || phy->hw != hw)
1110 			continue;
1111 
1112 		sband = mt76_get_survey_sband(phy, &idx);
1113 
1114 		if (idx == 0 && phy->dev->drv->update_survey)
1115 			mt76_update_survey(phy);
1116 
1117 		if (sband || !hw->wiphy->n_radio)
1118 			break;
1119 	}
1120 
1121 	if (!sband) {
1122 		ret = -ENOENT;
1123 		goto out;
1124 	}
1125 
1126 	chan = &sband->sband.channels[idx];
1127 	state = mt76_channel_state(phy, chan);
1128 
1129 	memset(survey, 0, sizeof(*survey));
1130 	survey->channel = chan;
1131 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1132 	survey->filled |= dev->drv->survey_flags;
1133 	if (state->noise)
1134 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1135 
1136 	if (chan == phy->main_chandef.chan) {
1137 		survey->filled |= SURVEY_INFO_IN_USE;
1138 
1139 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1140 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1141 	}
1142 
1143 	survey->time_busy = div_u64(state->cc_busy, 1000);
1144 	survey->time_rx = div_u64(state->cc_rx, 1000);
1145 	survey->time = div_u64(state->cc_active, 1000);
1146 	survey->noise = state->noise;
1147 
1148 	spin_lock_bh(&dev->cc_lock);
1149 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1150 	survey->time_tx = div_u64(state->cc_tx, 1000);
1151 	spin_unlock_bh(&dev->cc_lock);
1152 
1153 out:
1154 	mutex_unlock(&dev->mutex);
1155 
1156 	return ret;
1157 }
1158 EXPORT_SYMBOL_GPL(mt76_get_survey);
1159 
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1160 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1161 			 struct ieee80211_key_conf *key)
1162 {
1163 	struct ieee80211_key_seq seq;
1164 	int i;
1165 
1166 	wcid->rx_check_pn = false;
1167 
1168 	if (!key)
1169 		return;
1170 
1171 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1172 		return;
1173 
1174 	wcid->rx_check_pn = true;
1175 
1176 	/* data frame */
1177 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1178 		ieee80211_get_key_rx_seq(key, i, &seq);
1179 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1180 	}
1181 
1182 	/* robust management frame */
1183 	ieee80211_get_key_rx_seq(key, -1, &seq);
1184 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1185 
1186 }
1187 EXPORT_SYMBOL(mt76_wcid_key_setup);
1188 
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1189 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1190 {
1191 	int signal = -128;
1192 	u8 chains;
1193 
1194 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1195 		int cur, diff;
1196 
1197 		cur = *chain_signal;
1198 		if (!(chains & BIT(0)) ||
1199 		    cur > 0)
1200 			continue;
1201 
1202 		if (cur > signal)
1203 			swap(cur, signal);
1204 
1205 		diff = signal - cur;
1206 		if (diff == 0)
1207 			signal += 3;
1208 		else if (diff <= 2)
1209 			signal += 2;
1210 		else if (diff <= 6)
1211 			signal += 1;
1212 	}
1213 
1214 	return signal;
1215 }
1216 EXPORT_SYMBOL(mt76_rx_signal);
1217 
1218 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1219 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1220 		struct ieee80211_hw **hw,
1221 		struct ieee80211_sta **sta)
1222 {
1223 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1224 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1225 	struct mt76_rx_status mstat;
1226 
1227 	mstat = *((struct mt76_rx_status *)skb->cb);
1228 	memset(status, 0, sizeof(*status));
1229 
1230 	status->flag = mstat.flag;
1231 	status->freq = mstat.freq;
1232 	status->enc_flags = mstat.enc_flags;
1233 	status->encoding = mstat.encoding;
1234 	status->bw = mstat.bw;
1235 	if (status->encoding == RX_ENC_EHT) {
1236 		status->eht.ru = mstat.eht.ru;
1237 		status->eht.gi = mstat.eht.gi;
1238 	} else {
1239 		status->he_ru = mstat.he_ru;
1240 		status->he_gi = mstat.he_gi;
1241 		status->he_dcm = mstat.he_dcm;
1242 	}
1243 	status->rate_idx = mstat.rate_idx;
1244 	status->nss = mstat.nss;
1245 	status->band = mstat.band;
1246 	status->signal = mstat.signal;
1247 	status->chains = mstat.chains;
1248 	status->ampdu_reference = mstat.ampdu_ref;
1249 	status->device_timestamp = mstat.timestamp;
1250 	status->mactime = mstat.timestamp;
1251 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1252 	if (status->signal <= -128)
1253 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1254 
1255 	if (ieee80211_is_beacon(hdr->frame_control) ||
1256 	    ieee80211_is_probe_resp(hdr->frame_control))
1257 		status->boottime_ns = ktime_get_boottime_ns();
1258 
1259 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1260 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1261 		     sizeof(mstat.chain_signal));
1262 	memcpy(status->chain_signal, mstat.chain_signal,
1263 	       sizeof(mstat.chain_signal));
1264 
1265 	if (mstat.wcid) {
1266 		status->link_valid = mstat.wcid->link_valid;
1267 		status->link_id = mstat.wcid->link_id;
1268 	}
1269 
1270 	*sta = wcid_to_sta(mstat.wcid);
1271 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1272 }
1273 
1274 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1275 mt76_check_ccmp_pn(struct sk_buff *skb)
1276 {
1277 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1278 	struct mt76_wcid *wcid = status->wcid;
1279 	struct ieee80211_hdr *hdr;
1280 	int security_idx;
1281 	int ret;
1282 
1283 	if (!(status->flag & RX_FLAG_DECRYPTED))
1284 		return;
1285 
1286 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1287 		return;
1288 
1289 	if (!wcid || !wcid->rx_check_pn)
1290 		return;
1291 
1292 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1293 	if (status->flag & RX_FLAG_8023)
1294 		goto skip_hdr_check;
1295 
1296 	hdr = mt76_skb_get_hdr(skb);
1297 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1298 		/*
1299 		 * Validate the first fragment both here and in mac80211
1300 		 * All further fragments will be validated by mac80211 only.
1301 		 */
1302 		if (ieee80211_is_frag(hdr) &&
1303 		    !ieee80211_is_first_frag(hdr->frame_control))
1304 			return;
1305 	}
1306 
1307 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1308 	 *
1309 	 * the recipient shall maintain a single replay counter for received
1310 	 * individually addressed robust Management frames that are received
1311 	 * with the To DS subfield equal to 0, [...]
1312 	 */
1313 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1314 	    !ieee80211_has_tods(hdr->frame_control))
1315 		security_idx = IEEE80211_NUM_TIDS;
1316 
1317 skip_hdr_check:
1318 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1319 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1320 		     sizeof(status->iv));
1321 	if (ret <= 0) {
1322 		status->flag |= RX_FLAG_ONLY_MONITOR;
1323 		return;
1324 	}
1325 
1326 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1327 
1328 	if (status->flag & RX_FLAG_IV_STRIPPED)
1329 		status->flag |= RX_FLAG_PN_VALIDATED;
1330 }
1331 
1332 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1333 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1334 		    int len)
1335 {
1336 	struct mt76_wcid *wcid = status->wcid;
1337 	struct ieee80211_rx_status info = {
1338 		.enc_flags = status->enc_flags,
1339 		.rate_idx = status->rate_idx,
1340 		.encoding = status->encoding,
1341 		.band = status->band,
1342 		.nss = status->nss,
1343 		.bw = status->bw,
1344 	};
1345 	struct ieee80211_sta *sta;
1346 	u32 airtime;
1347 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1348 
1349 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1350 	spin_lock(&dev->cc_lock);
1351 	dev->cur_cc_bss_rx += airtime;
1352 	spin_unlock(&dev->cc_lock);
1353 
1354 	if (!wcid || !wcid->sta)
1355 		return;
1356 
1357 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1358 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1359 }
1360 
1361 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1362 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1363 {
1364 	struct mt76_wcid *wcid;
1365 	int wcid_idx;
1366 
1367 	if (!dev->rx_ampdu_len)
1368 		return;
1369 
1370 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1371 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1372 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1373 	else
1374 		wcid = NULL;
1375 	dev->rx_ampdu_status.wcid = wcid;
1376 
1377 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1378 
1379 	dev->rx_ampdu_len = 0;
1380 	dev->rx_ampdu_ref = 0;
1381 }
1382 
1383 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1384 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1385 {
1386 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1387 	struct mt76_wcid *wcid = status->wcid;
1388 
1389 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1390 		return;
1391 
1392 	if (!wcid || !wcid->sta) {
1393 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1394 
1395 		if (status->flag & RX_FLAG_8023)
1396 			return;
1397 
1398 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1399 			return;
1400 
1401 		wcid = NULL;
1402 	}
1403 
1404 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1405 	    status->ampdu_ref != dev->rx_ampdu_ref)
1406 		mt76_airtime_flush_ampdu(dev);
1407 
1408 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1409 		if (!dev->rx_ampdu_len ||
1410 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1411 			dev->rx_ampdu_status = *status;
1412 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1413 			dev->rx_ampdu_ref = status->ampdu_ref;
1414 		}
1415 
1416 		dev->rx_ampdu_len += skb->len;
1417 		return;
1418 	}
1419 
1420 	mt76_airtime_report(dev, status, skb->len);
1421 }
1422 
1423 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1424 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1425 {
1426 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1427 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1428 	struct ieee80211_sta *sta;
1429 	struct ieee80211_hw *hw;
1430 	struct mt76_wcid *wcid = status->wcid;
1431 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1432 	bool ps;
1433 
1434 	hw = mt76_phy_hw(dev, status->phy_idx);
1435 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1436 	    !(status->flag & RX_FLAG_8023)) {
1437 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1438 		if (sta)
1439 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1440 	}
1441 
1442 	mt76_airtime_check(dev, skb);
1443 
1444 	if (!wcid || !wcid->sta)
1445 		return;
1446 
1447 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1448 
1449 	if (status->signal <= 0)
1450 		ewma_signal_add(&wcid->rssi, -status->signal);
1451 
1452 	wcid->inactive_count = 0;
1453 
1454 	if (status->flag & RX_FLAG_8023)
1455 		return;
1456 
1457 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1458 		return;
1459 
1460 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1461 		ieee80211_sta_pspoll(sta);
1462 		return;
1463 	}
1464 
1465 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1466 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1467 	      ieee80211_is_data(hdr->frame_control)))
1468 		return;
1469 
1470 	ps = ieee80211_has_pm(hdr->frame_control);
1471 
1472 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1473 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1474 		ieee80211_sta_uapsd_trigger(sta, tidno);
1475 
1476 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1477 		return;
1478 
1479 	if (ps)
1480 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1481 
1482 	if (dev->drv->sta_ps)
1483 		dev->drv->sta_ps(dev, sta, ps);
1484 
1485 	if (!ps)
1486 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1487 
1488 	ieee80211_sta_ps_transition(sta, ps);
1489 }
1490 
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1491 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1492 		      struct napi_struct *napi)
1493 {
1494 	struct ieee80211_sta *sta;
1495 	struct ieee80211_hw *hw;
1496 	struct sk_buff *skb, *tmp;
1497 #if defined(__linux__)
1498 	LIST_HEAD(list);
1499 #elif defined(__FreeBSD__)
1500 	LINUX_LIST_HEAD(list);
1501 #endif
1502 
1503 	spin_lock(&dev->rx_lock);
1504 	while ((skb = __skb_dequeue(frames)) != NULL) {
1505 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1506 
1507 		mt76_check_ccmp_pn(skb);
1508 		skb_shinfo(skb)->frag_list = NULL;
1509 		mt76_rx_convert(dev, skb, &hw, &sta);
1510 		ieee80211_rx_list(hw, sta, skb, &list);
1511 
1512 		/* subsequent amsdu frames */
1513 		while (nskb) {
1514 			skb = nskb;
1515 			nskb = nskb->next;
1516 			skb->next = NULL;
1517 
1518 			mt76_rx_convert(dev, skb, &hw, &sta);
1519 			ieee80211_rx_list(hw, sta, skb, &list);
1520 		}
1521 	}
1522 	spin_unlock(&dev->rx_lock);
1523 
1524 	if (!napi) {
1525 		netif_receive_skb_list(&list);
1526 		return;
1527 	}
1528 
1529 	list_for_each_entry_safe(skb, tmp, &list, list) {
1530 		skb_list_del_init(skb);
1531 		napi_gro_receive(napi, skb);
1532 	}
1533 }
1534 
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1535 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1536 			   struct napi_struct *napi)
1537 {
1538 	struct sk_buff_head frames;
1539 	struct sk_buff *skb;
1540 
1541 	__skb_queue_head_init(&frames);
1542 
1543 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1544 		mt76_check_sta(dev, skb);
1545 		if (mtk_wed_device_active(&dev->mmio.wed))
1546 			__skb_queue_tail(&frames, skb);
1547 		else
1548 			mt76_rx_aggr_reorder(skb, &frames);
1549 	}
1550 
1551 	mt76_rx_complete(dev, &frames, napi);
1552 }
1553 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1554 
1555 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1556 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1557 	     struct ieee80211_sta *sta)
1558 {
1559 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1560 	struct mt76_dev *dev = phy->dev;
1561 	int ret;
1562 	int i;
1563 
1564 	mutex_lock(&dev->mutex);
1565 
1566 	ret = dev->drv->sta_add(dev, vif, sta);
1567 	if (ret)
1568 		goto out;
1569 
1570 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1571 		struct mt76_txq *mtxq;
1572 
1573 		if (!sta->txq[i])
1574 			continue;
1575 
1576 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1577 		mtxq->wcid = wcid->idx;
1578 	}
1579 
1580 	ewma_signal_init(&wcid->rssi);
1581 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1582 	phy->num_sta++;
1583 
1584 	mt76_wcid_init(wcid, phy->band_idx);
1585 out:
1586 	mutex_unlock(&dev->mutex);
1587 
1588 	return ret;
1589 }
1590 
__mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1591 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1592 		       struct ieee80211_sta *sta)
1593 {
1594 	struct mt76_dev *dev = phy->dev;
1595 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1596 	int i, idx = wcid->idx;
1597 
1598 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1599 		mt76_rx_aggr_stop(dev, wcid, i);
1600 
1601 	if (dev->drv->sta_remove)
1602 		dev->drv->sta_remove(dev, vif, sta);
1603 
1604 	mt76_wcid_cleanup(dev, wcid);
1605 
1606 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1607 	phy->num_sta--;
1608 }
1609 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1610 
1611 static void
mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1612 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1613 		struct ieee80211_sta *sta)
1614 {
1615 	struct mt76_dev *dev = phy->dev;
1616 
1617 	mutex_lock(&dev->mutex);
1618 	__mt76_sta_remove(phy, vif, sta);
1619 	mutex_unlock(&dev->mutex);
1620 }
1621 
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1622 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1623 		   struct ieee80211_sta *sta,
1624 		   enum ieee80211_sta_state old_state,
1625 		   enum ieee80211_sta_state new_state)
1626 {
1627 	struct mt76_phy *phy = hw->priv;
1628 	struct mt76_dev *dev = phy->dev;
1629 	enum mt76_sta_event ev;
1630 
1631 	phy = mt76_vif_phy(hw, vif);
1632 	if (!phy)
1633 		return -EINVAL;
1634 
1635 	if (old_state == IEEE80211_STA_NOTEXIST &&
1636 	    new_state == IEEE80211_STA_NONE)
1637 		return mt76_sta_add(phy, vif, sta);
1638 
1639 	if (old_state == IEEE80211_STA_NONE &&
1640 	    new_state == IEEE80211_STA_NOTEXIST)
1641 		mt76_sta_remove(phy, vif, sta);
1642 
1643 	if (!dev->drv->sta_event)
1644 		return 0;
1645 
1646 	if (old_state == IEEE80211_STA_AUTH &&
1647 	    new_state == IEEE80211_STA_ASSOC)
1648 		ev = MT76_STA_EVENT_ASSOC;
1649 	else if (old_state == IEEE80211_STA_ASSOC &&
1650 		 new_state == IEEE80211_STA_AUTHORIZED)
1651 		ev = MT76_STA_EVENT_AUTHORIZE;
1652 	else if (old_state == IEEE80211_STA_ASSOC &&
1653 		 new_state == IEEE80211_STA_AUTH)
1654 		ev = MT76_STA_EVENT_DISASSOC;
1655 	else
1656 		return 0;
1657 
1658 	return dev->drv->sta_event(dev, vif, sta, ev);
1659 }
1660 EXPORT_SYMBOL_GPL(mt76_sta_state);
1661 
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1662 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1663 			     struct ieee80211_sta *sta)
1664 {
1665 	struct mt76_phy *phy = hw->priv;
1666 	struct mt76_dev *dev = phy->dev;
1667 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1668 
1669 	mutex_lock(&dev->mutex);
1670 	spin_lock_bh(&dev->status_lock);
1671 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1672 	spin_unlock_bh(&dev->status_lock);
1673 	mutex_unlock(&dev->mutex);
1674 }
1675 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1676 
mt76_wcid_init(struct mt76_wcid * wcid,u8 band_idx)1677 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1678 {
1679 	wcid->hw_key_idx = -1;
1680 	wcid->phy_idx = band_idx;
1681 
1682 	INIT_LIST_HEAD(&wcid->tx_list);
1683 	skb_queue_head_init(&wcid->tx_pending);
1684 	skb_queue_head_init(&wcid->tx_offchannel);
1685 
1686 	INIT_LIST_HEAD(&wcid->list);
1687 	idr_init(&wcid->pktid);
1688 
1689 	INIT_LIST_HEAD(&wcid->poll_list);
1690 }
1691 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1692 
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1693 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1694 {
1695 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1696 	struct ieee80211_hw *hw;
1697 	struct sk_buff_head list;
1698 	struct sk_buff *skb;
1699 
1700 	mt76_tx_status_lock(dev, &list);
1701 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1702 	mt76_tx_status_unlock(dev, &list);
1703 
1704 	idr_destroy(&wcid->pktid);
1705 
1706 	spin_lock_bh(&phy->tx_lock);
1707 
1708 	if (!list_empty(&wcid->tx_list))
1709 		list_del_init(&wcid->tx_list);
1710 
1711 	spin_lock(&wcid->tx_pending.lock);
1712 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1713 	spin_unlock(&wcid->tx_pending.lock);
1714 
1715 	spin_unlock_bh(&phy->tx_lock);
1716 
1717 	while ((skb = __skb_dequeue(&list)) != NULL) {
1718 		hw = mt76_tx_status_get_hw(dev, skb);
1719 		ieee80211_free_txskb(hw, skb);
1720 	}
1721 }
1722 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1723 
mt76_wcid_add_poll(struct mt76_dev * dev,struct mt76_wcid * wcid)1724 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1725 {
1726 	if (test_bit(MT76_MCU_RESET, &dev->phy.state))
1727 		return;
1728 
1729 	spin_lock_bh(&dev->sta_poll_lock);
1730 	if (list_empty(&wcid->poll_list))
1731 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1732 	spin_unlock_bh(&dev->sta_poll_lock);
1733 }
1734 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1735 
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,int * dbm)1736 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1737 		     unsigned int link_id, int *dbm)
1738 {
1739 	struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1740 	int n_chains, delta;
1741 
1742 	if (!phy)
1743 		return -EINVAL;
1744 
1745 	n_chains = hweight16(phy->chainmask);
1746 	delta = mt76_tx_power_nss_delta(n_chains);
1747 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1748 
1749 	return 0;
1750 }
1751 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1752 
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1753 int mt76_init_sar_power(struct ieee80211_hw *hw,
1754 			const struct cfg80211_sar_specs *sar)
1755 {
1756 	struct mt76_phy *phy = hw->priv;
1757 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1758 	int i;
1759 
1760 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1761 		return -EINVAL;
1762 
1763 	for (i = 0; i < sar->num_sub_specs; i++) {
1764 		u32 index = sar->sub_specs[i].freq_range_index;
1765 		/* SAR specifies power limitaton in 0.25dbm */
1766 		s32 power = sar->sub_specs[i].power >> 1;
1767 
1768 		if (power > 127 || power < -127)
1769 			power = 127;
1770 
1771 		phy->frp[index].range = &capa->freq_ranges[index];
1772 		phy->frp[index].power = power;
1773 	}
1774 
1775 	return 0;
1776 }
1777 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1778 
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1779 int mt76_get_sar_power(struct mt76_phy *phy,
1780 		       struct ieee80211_channel *chan,
1781 		       int power)
1782 {
1783 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1784 	int freq, i;
1785 
1786 	if (!capa || !phy->frp)
1787 		return power;
1788 
1789 	if (power > 127 || power < -127)
1790 		power = 127;
1791 
1792 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1793 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1794 		if (phy->frp[i].range &&
1795 		    freq >= phy->frp[i].range->start_freq &&
1796 		    freq < phy->frp[i].range->end_freq) {
1797 			power = min_t(int, phy->frp[i].power, power);
1798 			break;
1799 		}
1800 	}
1801 
1802 	return power;
1803 }
1804 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1805 
1806 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1807 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1808 {
1809 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1810 		ieee80211_csa_finish(vif, 0);
1811 }
1812 
mt76_csa_finish(struct mt76_dev * dev)1813 void mt76_csa_finish(struct mt76_dev *dev)
1814 {
1815 	if (!dev->csa_complete)
1816 		return;
1817 
1818 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1819 		IEEE80211_IFACE_ITER_RESUME_ALL,
1820 		__mt76_csa_finish, dev);
1821 
1822 	dev->csa_complete = 0;
1823 }
1824 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1825 
1826 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1827 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1828 {
1829 	struct mt76_dev *dev = priv;
1830 
1831 	if (!vif->bss_conf.csa_active)
1832 		return;
1833 
1834 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1835 }
1836 
mt76_csa_check(struct mt76_dev * dev)1837 void mt76_csa_check(struct mt76_dev *dev)
1838 {
1839 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1840 		IEEE80211_IFACE_ITER_RESUME_ALL,
1841 		__mt76_csa_check, dev);
1842 }
1843 EXPORT_SYMBOL_GPL(mt76_csa_check);
1844 
1845 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1846 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1847 {
1848 	return 0;
1849 }
1850 EXPORT_SYMBOL_GPL(mt76_set_tim);
1851 
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1852 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1853 {
1854 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1855 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1856 	u8 *hdr, *pn = status->iv;
1857 
1858 	__skb_push(skb, 8);
1859 	memmove(skb->data, skb->data + 8, hdr_len);
1860 	hdr = skb->data + hdr_len;
1861 
1862 	hdr[0] = pn[5];
1863 	hdr[1] = pn[4];
1864 	hdr[2] = 0;
1865 	hdr[3] = 0x20 | (key_id << 6);
1866 	hdr[4] = pn[3];
1867 	hdr[5] = pn[2];
1868 	hdr[6] = pn[1];
1869 	hdr[7] = pn[0];
1870 
1871 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1872 }
1873 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1874 
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1875 int mt76_get_rate(struct mt76_dev *dev,
1876 		  struct ieee80211_supported_band *sband,
1877 		  int idx, bool cck)
1878 {
1879 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1880 	int i, offset = 0, len = sband->n_bitrates;
1881 
1882 	if (cck) {
1883 		if (!is_2g)
1884 			return 0;
1885 
1886 		idx &= ~BIT(2); /* short preamble */
1887 	} else if (is_2g) {
1888 		offset = 4;
1889 	}
1890 
1891 	for (i = offset; i < len; i++) {
1892 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1893 			return i;
1894 	}
1895 
1896 	return 0;
1897 }
1898 EXPORT_SYMBOL_GPL(mt76_get_rate);
1899 
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1900 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1901 		  const u8 *mac)
1902 {
1903 	struct mt76_phy *phy = hw->priv;
1904 
1905 	set_bit(MT76_SCANNING, &phy->state);
1906 }
1907 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1908 
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1909 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1910 {
1911 	struct mt76_phy *phy = hw->priv;
1912 
1913 	clear_bit(MT76_SCANNING, &phy->state);
1914 }
1915 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1916 
mt76_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)1917 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1918 {
1919 	struct mt76_phy *phy = hw->priv;
1920 	struct mt76_dev *dev = phy->dev;
1921 	int i;
1922 
1923 	mutex_lock(&dev->mutex);
1924 	*tx_ant = 0;
1925 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1926 		if (dev->phys[i] && dev->phys[i]->hw == hw)
1927 			*tx_ant |= dev->phys[i]->chainmask;
1928 	*rx_ant = *tx_ant;
1929 	mutex_unlock(&dev->mutex);
1930 
1931 	return 0;
1932 }
1933 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1934 
1935 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)1936 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1937 		int ring_base, void *wed, u32 flags)
1938 {
1939 	struct mt76_queue *hwq;
1940 	int err;
1941 
1942 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1943 	if (!hwq)
1944 		return ERR_PTR(-ENOMEM);
1945 
1946 	hwq->flags = flags;
1947 	hwq->wed = wed;
1948 
1949 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1950 	if (err < 0)
1951 		return ERR_PTR(err);
1952 
1953 	return hwq;
1954 }
1955 EXPORT_SYMBOL_GPL(mt76_init_queue);
1956 
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1957 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1958 			 struct mt76_sta_stats *stats, bool eht)
1959 {
1960 	int i, ei = wi->initial_stat_idx;
1961 	u64 *data = wi->data;
1962 
1963 	wi->sta_count++;
1964 
1965 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1966 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1967 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1968 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1969 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1970 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1971 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1972 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1973 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1974 	if (eht) {
1975 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1976 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1977 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1978 	}
1979 
1980 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1981 		data[ei++] += stats->tx_bw[i];
1982 
1983 	for (i = 0; i < (eht ? 14 : 12); i++)
1984 		data[ei++] += stats->tx_mcs[i];
1985 
1986 	for (i = 0; i < 4; i++)
1987 		data[ei++] += stats->tx_nss[i];
1988 
1989 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1990 }
1991 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1992 
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)1993 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1994 {
1995 #ifdef CONFIG_PAGE_POOL_STATS
1996 	struct page_pool_stats stats = {};
1997 	int i;
1998 
1999 	mt76_for_each_q_rx(dev, i)
2000 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
2001 
2002 	page_pool_ethtool_stats_get(data, &stats);
2003 	*index += page_pool_ethtool_stats_get_count();
2004 #endif
2005 }
2006 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
2007 
mt76_phy_dfs_state(struct mt76_phy * phy)2008 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
2009 {
2010 	struct ieee80211_hw *hw = phy->hw;
2011 	struct mt76_dev *dev = phy->dev;
2012 
2013 	if (dev->region == NL80211_DFS_UNSET ||
2014 	    test_bit(MT76_SCANNING, &phy->state))
2015 		return MT_DFS_STATE_DISABLED;
2016 
2017 	if (!phy->radar_enabled) {
2018 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
2019 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
2020 			return MT_DFS_STATE_ACTIVE;
2021 
2022 		return MT_DFS_STATE_DISABLED;
2023 	}
2024 
2025 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
2026 		return MT_DFS_STATE_CAC;
2027 
2028 	return MT_DFS_STATE_ACTIVE;
2029 }
2030 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
2031 
mt76_vif_cleanup(struct mt76_dev * dev,struct ieee80211_vif * vif)2032 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
2033 {
2034 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2035 	struct mt76_vif_data *mvif = mlink->mvif;
2036 
2037 	rcu_assign_pointer(mvif->link[0], NULL);
2038 	mt76_abort_scan(dev);
2039 	if (mvif->roc_phy)
2040 		mt76_abort_roc(mvif->roc_phy);
2041 }
2042 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2043