1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8
9 #define CHAN2G(_idx, _freq) { \
10 .band = NL80211_BAND_2GHZ, \
11 .center_freq = (_freq), \
12 .hw_value = (_idx), \
13 .max_power = 30, \
14 }
15
16 #define CHAN5G(_idx, _freq) { \
17 .band = NL80211_BAND_5GHZ, \
18 .center_freq = (_freq), \
19 .hw_value = (_idx), \
20 .max_power = 30, \
21 }
22
23 #define CHAN6G(_idx, _freq) { \
24 .band = NL80211_BAND_6GHZ, \
25 .center_freq = (_freq), \
26 .hw_value = (_idx), \
27 .max_power = 30, \
28 }
29
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 CHAN2G(1, 2412),
32 CHAN2G(2, 2417),
33 CHAN2G(3, 2422),
34 CHAN2G(4, 2427),
35 CHAN2G(5, 2432),
36 CHAN2G(6, 2437),
37 CHAN2G(7, 2442),
38 CHAN2G(8, 2447),
39 CHAN2G(9, 2452),
40 CHAN2G(10, 2457),
41 CHAN2G(11, 2462),
42 CHAN2G(12, 2467),
43 CHAN2G(13, 2472),
44 CHAN2G(14, 2484),
45 };
46
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 CHAN5G(36, 5180),
49 CHAN5G(40, 5200),
50 CHAN5G(44, 5220),
51 CHAN5G(48, 5240),
52
53 CHAN5G(52, 5260),
54 CHAN5G(56, 5280),
55 CHAN5G(60, 5300),
56 CHAN5G(64, 5320),
57
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
61 CHAN5G(112, 5560),
62 CHAN5G(116, 5580),
63 CHAN5G(120, 5600),
64 CHAN5G(124, 5620),
65 CHAN5G(128, 5640),
66 CHAN5G(132, 5660),
67 CHAN5G(136, 5680),
68 CHAN5G(140, 5700),
69 CHAN5G(144, 5720),
70
71 CHAN5G(149, 5745),
72 CHAN5G(153, 5765),
73 CHAN5G(157, 5785),
74 CHAN5G(161, 5805),
75 CHAN5G(165, 5825),
76 CHAN5G(169, 5845),
77 CHAN5G(173, 5865),
78 CHAN5G(177, 5885),
79 };
80
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 /* UNII-5 */
83 CHAN6G(1, 5955),
84 CHAN6G(5, 5975),
85 CHAN6G(9, 5995),
86 CHAN6G(13, 6015),
87 CHAN6G(17, 6035),
88 CHAN6G(21, 6055),
89 CHAN6G(25, 6075),
90 CHAN6G(29, 6095),
91 CHAN6G(33, 6115),
92 CHAN6G(37, 6135),
93 CHAN6G(41, 6155),
94 CHAN6G(45, 6175),
95 CHAN6G(49, 6195),
96 CHAN6G(53, 6215),
97 CHAN6G(57, 6235),
98 CHAN6G(61, 6255),
99 CHAN6G(65, 6275),
100 CHAN6G(69, 6295),
101 CHAN6G(73, 6315),
102 CHAN6G(77, 6335),
103 CHAN6G(81, 6355),
104 CHAN6G(85, 6375),
105 CHAN6G(89, 6395),
106 CHAN6G(93, 6415),
107 /* UNII-6 */
108 CHAN6G(97, 6435),
109 CHAN6G(101, 6455),
110 CHAN6G(105, 6475),
111 CHAN6G(109, 6495),
112 CHAN6G(113, 6515),
113 CHAN6G(117, 6535),
114 /* UNII-7 */
115 CHAN6G(121, 6555),
116 CHAN6G(125, 6575),
117 CHAN6G(129, 6595),
118 CHAN6G(133, 6615),
119 CHAN6G(137, 6635),
120 CHAN6G(141, 6655),
121 CHAN6G(145, 6675),
122 CHAN6G(149, 6695),
123 CHAN6G(153, 6715),
124 CHAN6G(157, 6735),
125 CHAN6G(161, 6755),
126 CHAN6G(165, 6775),
127 CHAN6G(169, 6795),
128 CHAN6G(173, 6815),
129 CHAN6G(177, 6835),
130 CHAN6G(181, 6855),
131 CHAN6G(185, 6875),
132 /* UNII-8 */
133 CHAN6G(189, 6895),
134 CHAN6G(193, 6915),
135 CHAN6G(197, 6935),
136 CHAN6G(201, 6955),
137 CHAN6G(205, 6975),
138 CHAN6G(209, 6995),
139 CHAN6G(213, 7015),
140 CHAN6G(217, 7035),
141 CHAN6G(221, 7055),
142 CHAN6G(225, 7075),
143 CHAN6G(229, 7095),
144 CHAN6G(233, 7115),
145 };
146
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
158 };
159
160 struct ieee80211_rate mt76_rates[] = {
161 CCK_RATE(0, 10),
162 CCK_RATE(1, 20),
163 CCK_RATE(2, 55),
164 CCK_RATE(3, 110),
165 OFDM_RATE(11, 60),
166 OFDM_RATE(15, 90),
167 OFDM_RATE(10, 120),
168 OFDM_RATE(14, 180),
169 OFDM_RATE(9, 240),
170 OFDM_RATE(13, 360),
171 OFDM_RATE(8, 480),
172 OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 { .start_freq = 2402, .end_freq = 2494, },
178 { .start_freq = 5150, .end_freq = 5350, },
179 { .start_freq = 5350, .end_freq = 5470, },
180 { .start_freq = 5470, .end_freq = 5725, },
181 { .start_freq = 5725, .end_freq = 5950, },
182 { .start_freq = 5945, .end_freq = 6165, },
183 { .start_freq = 6165, .end_freq = 6405, },
184 { .start_freq = 6405, .end_freq = 6525, },
185 { .start_freq = 6525, .end_freq = 6705, },
186 { .start_freq = 6705, .end_freq = 6865, },
187 { .start_freq = 6865, .end_freq = 7125, },
188 };
189
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 .type = NL80211_SAR_TYPE_POWER,
192 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 .freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 struct mt76_dev *dev = phy->dev;
199 struct ieee80211_hw *hw = phy->hw;
200 struct device_node *np = dev->dev->of_node;
201
202 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 return 0;
204
205 np = of_get_child_by_name(np, "led");
206 if (np) {
207 if (!of_device_is_available(np)) {
208 of_node_put(np);
209 dev_info(dev->dev,
210 "led registration was explicitly disabled by dts\n");
211 return 0;
212 }
213
214 if (phy == &dev->phy) {
215 int led_pin;
216
217 if (!of_property_read_u32(np, "led-sources", &led_pin))
218 phy->leds.pin = led_pin;
219
220 phy->leds.al =
221 of_property_read_bool(np, "led-active-low");
222 }
223
224 of_node_put(np);
225 }
226
227 snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 wiphy_name(hw->wiphy));
229
230 phy->leds.cdev.name = phy->leds.name;
231 phy->leds.cdev.default_trigger =
232 ieee80211_create_tpt_led_trigger(hw,
233 IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 mt76_tpt_blink,
235 ARRAY_SIZE(mt76_tpt_blink));
236
237 dev_info(dev->dev,
238 "registering led '%s'\n", phy->leds.name);
239
240 return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242
mt76_led_cleanup(struct mt76_phy * phy)243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 return;
247
248 led_classdev_unregister(&phy->leds.cdev);
249 }
250
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 struct ieee80211_supported_band *sband,
253 bool vht)
254 {
255 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 int i, nstream = hweight8(phy->antenna_mask);
257 struct ieee80211_sta_vht_cap *vht_cap;
258 u16 mcs_map = 0;
259
260 if (nstream > 1)
261 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 else
263 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264
265 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267
268 if (!vht)
269 return;
270
271 vht_cap = &sband->vht_cap;
272 if (nstream > 1)
273 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 else
275 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278
279 for (i = 0; i < 8; i++) {
280 if (i < nstream)
281 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 else
283 mcs_map |=
284 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 }
286 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 vht_cap->vht_mcs.tx_highest |=
290 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 if (phy->cap.has_2ghz)
296 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 if (phy->cap.has_5ghz)
298 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 if (phy->cap.has_6ghz)
300 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303
304 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 const struct ieee80211_channel *chan, int n_chan,
307 struct ieee80211_rate *rates, int n_rates,
308 bool ht, bool vht)
309 {
310 struct ieee80211_supported_band *sband = &msband->sband;
311 struct ieee80211_sta_vht_cap *vht_cap;
312 struct ieee80211_sta_ht_cap *ht_cap;
313 struct mt76_dev *dev = phy->dev;
314 void *chanlist;
315 int size;
316
317 size = n_chan * sizeof(*chan);
318 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 if (!chanlist)
320 return -ENOMEM;
321
322 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 GFP_KERNEL);
324 if (!msband->chan)
325 return -ENOMEM;
326
327 sband->channels = chanlist;
328 sband->n_channels = n_chan;
329 sband->bitrates = rates;
330 sband->n_bitrates = n_rates;
331
332 if (!ht)
333 return 0;
334
335 ht_cap = &sband->ht_cap;
336 ht_cap->ht_supported = true;
337 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 IEEE80211_HT_CAP_GRN_FLD |
339 IEEE80211_HT_CAP_SGI_20 |
340 IEEE80211_HT_CAP_SGI_40 |
341 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342
343 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345
346 mt76_init_stream_cap(phy, sband, vht);
347
348 if (!vht)
349 return 0;
350
351 vht_cap = &sband->vht_cap;
352 vht_cap->vht_supported = true;
353 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 IEEE80211_VHT_CAP_RXSTBC_1 |
355 IEEE80211_VHT_CAP_SHORT_GI_80 |
356 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357
358 return 0;
359 }
360
361 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 int n_rates)
364 {
365 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366
367 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 ARRAY_SIZE(mt76_channels_2ghz), rates,
369 n_rates, true, false);
370 }
371
372 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 int n_rates, bool vht)
375 {
376 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377
378 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 ARRAY_SIZE(mt76_channels_5ghz), rates,
380 n_rates, true, vht);
381 }
382
383 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 int n_rates)
386 {
387 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388
389 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 ARRAY_SIZE(mt76_channels_6ghz), rates,
391 n_rates, false, false);
392 }
393
394 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 enum nl80211_band band)
397 {
398 struct ieee80211_supported_band *sband = &msband->sband;
399 bool found = false;
400 int i;
401
402 if (!sband)
403 return;
404
405 for (i = 0; i < sband->n_channels; i++) {
406 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 continue;
408
409 found = true;
410 break;
411 }
412
413 if (found) {
414 cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
415 NL80211_CHAN_HT20);
416 phy->chan_state = &msband->chan[0];
417 phy->dev->band_phys[band] = phy;
418 return;
419 }
420
421 sband->n_channels = 0;
422 if (phy->hw->wiphy->bands[band] == sband)
423 phy->hw->wiphy->bands[band] = NULL;
424 }
425
426 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)427 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
428 {
429 struct mt76_dev *dev = phy->dev;
430 struct wiphy *wiphy = hw->wiphy;
431
432 INIT_LIST_HEAD(&phy->tx_list);
433 spin_lock_init(&phy->tx_lock);
434 INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
435
436 if ((void *)phy != hw->priv)
437 return 0;
438
439 SET_IEEE80211_DEV(hw, dev->dev);
440 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
441
442 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
443 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
444 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
445 WIPHY_FLAG_SUPPORTS_TDLS |
446 WIPHY_FLAG_AP_UAPSD;
447
448 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
449 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
450 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
451
452 if (!wiphy->available_antennas_tx)
453 wiphy->available_antennas_tx = phy->antenna_mask;
454 if (!wiphy->available_antennas_rx)
455 wiphy->available_antennas_rx = phy->antenna_mask;
456
457 wiphy->sar_capa = &mt76_sar_capa;
458 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
459 sizeof(struct mt76_freq_range_power),
460 GFP_KERNEL);
461 if (!phy->frp)
462 return -ENOMEM;
463
464 hw->txq_data_size = sizeof(struct mt76_txq);
465 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
466
467 if (!hw->max_tx_fragments)
468 hw->max_tx_fragments = 16;
469
470 ieee80211_hw_set(hw, SIGNAL_DBM);
471 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
472 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
473 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
474 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
475 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
476 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
477 ieee80211_hw_set(hw, SPECTRUM_MGMT);
478
479 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
480 hw->max_tx_fragments > 1) {
481 ieee80211_hw_set(hw, TX_AMSDU);
482 ieee80211_hw_set(hw, TX_FRAG_LIST);
483 }
484
485 ieee80211_hw_set(hw, MFP_CAPABLE);
486 ieee80211_hw_set(hw, AP_LINK_PS);
487 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
488
489 return 0;
490 }
491
492 struct mt76_phy *
mt76_alloc_radio_phy(struct mt76_dev * dev,unsigned int size,u8 band_idx)493 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
494 u8 band_idx)
495 {
496 struct ieee80211_hw *hw = dev->phy.hw;
497 unsigned int phy_size;
498 struct mt76_phy *phy;
499
500 phy_size = ALIGN(sizeof(*phy), 8);
501 phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
502 if (!phy)
503 return NULL;
504
505 phy->dev = dev;
506 phy->hw = hw;
507 phy->priv = (void *)phy + phy_size;
508 phy->band_idx = band_idx;
509
510 return phy;
511 }
512 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
513
514 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)515 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
516 const struct ieee80211_ops *ops, u8 band_idx)
517 {
518 struct ieee80211_hw *hw;
519 unsigned int phy_size;
520 struct mt76_phy *phy;
521
522 phy_size = ALIGN(sizeof(*phy), 8);
523 hw = ieee80211_alloc_hw(size + phy_size, ops);
524 if (!hw)
525 return NULL;
526
527 phy = hw->priv;
528 phy->dev = dev;
529 phy->hw = hw;
530 phy->priv = hw->priv + phy_size;
531 phy->band_idx = band_idx;
532
533 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
534 hw->wiphy->interface_modes =
535 BIT(NL80211_IFTYPE_STATION) |
536 BIT(NL80211_IFTYPE_AP) |
537 #ifdef CONFIG_MAC80211_MESH
538 BIT(NL80211_IFTYPE_MESH_POINT) |
539 #endif
540 BIT(NL80211_IFTYPE_P2P_CLIENT) |
541 BIT(NL80211_IFTYPE_P2P_GO) |
542 BIT(NL80211_IFTYPE_ADHOC);
543
544 return phy;
545 }
546 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
547
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)548 int mt76_register_phy(struct mt76_phy *phy, bool vht,
549 struct ieee80211_rate *rates, int n_rates)
550 {
551 int ret;
552
553 ret = mt76_phy_init(phy, phy->hw);
554 if (ret)
555 return ret;
556
557 if (phy->cap.has_2ghz) {
558 ret = mt76_init_sband_2g(phy, rates, n_rates);
559 if (ret)
560 return ret;
561 }
562
563 if (phy->cap.has_5ghz) {
564 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
565 if (ret)
566 return ret;
567 }
568
569 if (phy->cap.has_6ghz) {
570 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
571 if (ret)
572 return ret;
573 }
574
575 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
576 ret = mt76_led_init(phy);
577 if (ret)
578 return ret;
579 }
580
581 wiphy_read_of_freq_limits(phy->hw->wiphy);
582 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
583 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
584 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
585
586 if ((void *)phy == phy->hw->priv) {
587 ret = ieee80211_register_hw(phy->hw);
588 if (ret)
589 return ret;
590 }
591
592 set_bit(MT76_STATE_REGISTERED, &phy->state);
593 phy->dev->phys[phy->band_idx] = phy;
594
595 return 0;
596 }
597 EXPORT_SYMBOL_GPL(mt76_register_phy);
598
mt76_unregister_phy(struct mt76_phy * phy)599 void mt76_unregister_phy(struct mt76_phy *phy)
600 {
601 struct mt76_dev *dev = phy->dev;
602
603 if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
604 return;
605
606 if (IS_ENABLED(CONFIG_MT76_LEDS))
607 mt76_led_cleanup(phy);
608 mt76_tx_status_check(dev, true);
609 ieee80211_unregister_hw(phy->hw);
610 dev->phys[phy->band_idx] = NULL;
611 }
612 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
613
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)614 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
615 {
616 bool is_qrx = mt76_queue_is_rx(dev, q);
617 struct page_pool_params pp_params = {
618 .order = 0,
619 .flags = 0,
620 .nid = NUMA_NO_NODE,
621 .dev = dev->dma_dev,
622 };
623 int idx = is_qrx ? q - dev->q_rx : -1;
624
625 /* Allocate page_pools just for rx/wed_tx_free queues */
626 if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
627 return 0;
628
629 switch (idx) {
630 case MT_RXQ_MAIN:
631 case MT_RXQ_BAND1:
632 case MT_RXQ_BAND2:
633 pp_params.pool_size = 256;
634 break;
635 default:
636 pp_params.pool_size = 16;
637 break;
638 }
639
640 if (mt76_is_mmio(dev)) {
641 /* rely on page_pool for DMA mapping */
642 pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
643 pp_params.dma_dir = DMA_FROM_DEVICE;
644 pp_params.max_len = PAGE_SIZE;
645 pp_params.offset = 0;
646 /* NAPI is available just for rx queues */
647 if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
648 pp_params.napi = &dev->napi[idx];
649 }
650
651 q->page_pool = page_pool_create(&pp_params);
652 if (IS_ERR(q->page_pool)) {
653 int err = PTR_ERR(q->page_pool);
654
655 q->page_pool = NULL;
656 return err;
657 }
658
659 return 0;
660 }
661 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
662
663 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)664 mt76_alloc_device(struct device *pdev, unsigned int size,
665 const struct ieee80211_ops *ops,
666 const struct mt76_driver_ops *drv_ops)
667 {
668 struct ieee80211_hw *hw;
669 struct mt76_phy *phy;
670 struct mt76_dev *dev;
671 int i;
672
673 hw = ieee80211_alloc_hw(size, ops);
674 if (!hw)
675 return NULL;
676
677 dev = hw->priv;
678 dev->hw = hw;
679 dev->dev = pdev;
680 dev->drv = drv_ops;
681 dev->dma_dev = pdev;
682
683 phy = &dev->phy;
684 phy->dev = dev;
685 phy->hw = hw;
686 phy->band_idx = MT_BAND0;
687 dev->phys[phy->band_idx] = phy;
688
689 spin_lock_init(&dev->rx_lock);
690 spin_lock_init(&dev->lock);
691 spin_lock_init(&dev->cc_lock);
692 spin_lock_init(&dev->status_lock);
693 spin_lock_init(&dev->wed_lock);
694 mutex_init(&dev->mutex);
695 init_waitqueue_head(&dev->tx_wait);
696
697 skb_queue_head_init(&dev->mcu.res_q);
698 init_waitqueue_head(&dev->mcu.wait);
699 mutex_init(&dev->mcu.mutex);
700 dev->tx_worker.fn = mt76_tx_worker;
701
702 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
703 hw->wiphy->interface_modes =
704 BIT(NL80211_IFTYPE_STATION) |
705 BIT(NL80211_IFTYPE_AP) |
706 #ifdef CONFIG_MAC80211_MESH
707 BIT(NL80211_IFTYPE_MESH_POINT) |
708 #endif
709 BIT(NL80211_IFTYPE_P2P_CLIENT) |
710 BIT(NL80211_IFTYPE_P2P_GO) |
711 BIT(NL80211_IFTYPE_ADHOC);
712
713 spin_lock_init(&dev->token_lock);
714 idr_init(&dev->token);
715
716 spin_lock_init(&dev->rx_token_lock);
717 idr_init(&dev->rx_token);
718
719 INIT_LIST_HEAD(&dev->wcid_list);
720 INIT_LIST_HEAD(&dev->sta_poll_list);
721 spin_lock_init(&dev->sta_poll_lock);
722
723 INIT_LIST_HEAD(&dev->txwi_cache);
724 INIT_LIST_HEAD(&dev->rxwi_cache);
725 dev->token_size = dev->drv->token_size;
726 INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
727
728 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
729 skb_queue_head_init(&dev->rx_skb[i]);
730
731 dev->wq = alloc_ordered_workqueue("mt76", 0);
732 if (!dev->wq) {
733 ieee80211_free_hw(hw);
734 return NULL;
735 }
736
737 return dev;
738 }
739 EXPORT_SYMBOL_GPL(mt76_alloc_device);
740
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)741 int mt76_register_device(struct mt76_dev *dev, bool vht,
742 struct ieee80211_rate *rates, int n_rates)
743 {
744 struct ieee80211_hw *hw = dev->hw;
745 struct mt76_phy *phy = &dev->phy;
746 int ret;
747
748 dev_set_drvdata(dev->dev, dev);
749 mt76_wcid_init(&dev->global_wcid, phy->band_idx);
750 ret = mt76_phy_init(phy, hw);
751 if (ret)
752 return ret;
753
754 if (phy->cap.has_2ghz) {
755 ret = mt76_init_sband_2g(phy, rates, n_rates);
756 if (ret)
757 return ret;
758 }
759
760 if (phy->cap.has_5ghz) {
761 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
762 if (ret)
763 return ret;
764 }
765
766 if (phy->cap.has_6ghz) {
767 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
768 if (ret)
769 return ret;
770 }
771
772 wiphy_read_of_freq_limits(hw->wiphy);
773 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
774 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
775 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
776
777 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
778 ret = mt76_led_init(phy);
779 if (ret)
780 return ret;
781 }
782
783 ret = ieee80211_register_hw(hw);
784 if (ret)
785 return ret;
786
787 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
788 set_bit(MT76_STATE_REGISTERED, &phy->state);
789 sched_set_fifo_low(dev->tx_worker.task);
790
791 return 0;
792 }
793 EXPORT_SYMBOL_GPL(mt76_register_device);
794
mt76_unregister_device(struct mt76_dev * dev)795 void mt76_unregister_device(struct mt76_dev *dev)
796 {
797 struct ieee80211_hw *hw = dev->hw;
798
799 if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
800 return;
801
802 if (IS_ENABLED(CONFIG_MT76_LEDS))
803 mt76_led_cleanup(&dev->phy);
804 mt76_tx_status_check(dev, true);
805 mt76_wcid_cleanup(dev, &dev->global_wcid);
806 ieee80211_unregister_hw(hw);
807 }
808 EXPORT_SYMBOL_GPL(mt76_unregister_device);
809
mt76_free_device(struct mt76_dev * dev)810 void mt76_free_device(struct mt76_dev *dev)
811 {
812 mt76_worker_teardown(&dev->tx_worker);
813 if (dev->wq) {
814 destroy_workqueue(dev->wq);
815 dev->wq = NULL;
816 }
817 ieee80211_free_hw(dev->hw);
818 }
819 EXPORT_SYMBOL_GPL(mt76_free_device);
820
mt76_vif_phy(struct ieee80211_hw * hw,struct ieee80211_vif * vif)821 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
822 struct ieee80211_vif *vif)
823 {
824 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
825 struct mt76_chanctx *ctx;
826
827 if (!hw->wiphy->n_radio)
828 return hw->priv;
829
830 if (!mlink->ctx)
831 return NULL;
832
833 ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
834 return ctx->phy;
835 }
836 EXPORT_SYMBOL_GPL(mt76_vif_phy);
837
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)838 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
839 {
840 struct sk_buff *skb = phy->rx_amsdu[q].head;
841 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
842 struct mt76_dev *dev = phy->dev;
843
844 phy->rx_amsdu[q].head = NULL;
845 phy->rx_amsdu[q].tail = NULL;
846
847 /*
848 * Validate if the amsdu has a proper first subframe.
849 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
850 * flag of the QoS header gets flipped. In such cases, the first
851 * subframe has a LLC/SNAP header in the location of the destination
852 * address.
853 */
854 if (skb_shinfo(skb)->frag_list) {
855 int offset = 0;
856
857 if (!(status->flag & RX_FLAG_8023)) {
858 offset = ieee80211_get_hdrlen_from_skb(skb);
859
860 if ((status->flag &
861 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
862 RX_FLAG_DECRYPTED)
863 offset += 8;
864 }
865
866 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
867 dev_kfree_skb(skb);
868 return;
869 }
870 }
871 __skb_queue_tail(&dev->rx_skb[q], skb);
872 }
873
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)874 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
875 struct sk_buff *skb)
876 {
877 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
878
879 if (phy->rx_amsdu[q].head &&
880 (!status->amsdu || status->first_amsdu ||
881 status->seqno != phy->rx_amsdu[q].seqno))
882 mt76_rx_release_amsdu(phy, q);
883
884 if (!phy->rx_amsdu[q].head) {
885 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
886 phy->rx_amsdu[q].seqno = status->seqno;
887 phy->rx_amsdu[q].head = skb;
888 } else {
889 *phy->rx_amsdu[q].tail = skb;
890 phy->rx_amsdu[q].tail = &skb->next;
891 }
892
893 if (!status->amsdu || status->last_amsdu)
894 mt76_rx_release_amsdu(phy, q);
895 }
896
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)897 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
898 {
899 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
900 struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
901
902 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
903 dev_kfree_skb(skb);
904 return;
905 }
906
907 #ifdef CONFIG_NL80211_TESTMODE
908 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
909 phy->test.rx_stats.packets[q]++;
910 if (status->flag & RX_FLAG_FAILED_FCS_CRC)
911 phy->test.rx_stats.fcs_error[q]++;
912 }
913 #endif
914
915 mt76_rx_release_burst(phy, q, skb);
916 }
917 EXPORT_SYMBOL_GPL(mt76_rx);
918
mt76_has_tx_pending(struct mt76_phy * phy)919 bool mt76_has_tx_pending(struct mt76_phy *phy)
920 {
921 struct mt76_queue *q;
922 int i;
923
924 for (i = 0; i < __MT_TXQ_MAX; i++) {
925 q = phy->q_tx[i];
926 if (q && q->queued)
927 return true;
928 }
929
930 return false;
931 }
932 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
933
934 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)935 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
936 {
937 struct mt76_sband *msband;
938 int idx;
939
940 if (c->band == NL80211_BAND_2GHZ)
941 msband = &phy->sband_2g;
942 else if (c->band == NL80211_BAND_6GHZ)
943 msband = &phy->sband_6g;
944 else
945 msband = &phy->sband_5g;
946
947 idx = c - &msband->sband.channels[0];
948 return &msband->chan[idx];
949 }
950
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)951 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
952 {
953 struct mt76_channel_state *state = phy->chan_state;
954
955 state->cc_active += ktime_to_us(ktime_sub(time,
956 phy->survey_time));
957 phy->survey_time = time;
958 }
959 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
960
mt76_update_survey(struct mt76_phy * phy)961 void mt76_update_survey(struct mt76_phy *phy)
962 {
963 struct mt76_dev *dev = phy->dev;
964 ktime_t cur_time;
965
966 if (dev->drv->update_survey)
967 dev->drv->update_survey(phy);
968
969 cur_time = ktime_get_boottime();
970 mt76_update_survey_active_time(phy, cur_time);
971
972 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
973 struct mt76_channel_state *state = phy->chan_state;
974
975 spin_lock_bh(&dev->cc_lock);
976 state->cc_bss_rx += dev->cur_cc_bss_rx;
977 dev->cur_cc_bss_rx = 0;
978 spin_unlock_bh(&dev->cc_lock);
979 }
980 }
981 EXPORT_SYMBOL_GPL(mt76_update_survey);
982
__mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)983 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
984 bool offchannel)
985 {
986 struct mt76_dev *dev = phy->dev;
987 int timeout = HZ / 5;
988 int ret;
989
990 set_bit(MT76_RESET, &phy->state);
991
992 mt76_worker_disable(&dev->tx_worker);
993 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
994 mt76_update_survey(phy);
995
996 if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
997 phy->chandef.width != chandef->width)
998 phy->dfs_state = MT_DFS_STATE_UNKNOWN;
999
1000 phy->chandef = *chandef;
1001 phy->chan_state = mt76_channel_state(phy, chandef->chan);
1002 phy->offchannel = offchannel;
1003
1004 if (!offchannel)
1005 phy->main_chandef = *chandef;
1006
1007 if (chandef->chan != phy->main_chandef.chan)
1008 memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1009
1010 ret = dev->drv->set_channel(phy);
1011
1012 clear_bit(MT76_RESET, &phy->state);
1013 mt76_worker_enable(&dev->tx_worker);
1014 mt76_worker_schedule(&dev->tx_worker);
1015
1016 return ret;
1017 }
1018
mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1019 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1020 bool offchannel)
1021 {
1022 struct mt76_dev *dev = phy->dev;
1023 int ret;
1024
1025 cancel_delayed_work_sync(&phy->mac_work);
1026
1027 mutex_lock(&dev->mutex);
1028 ret = __mt76_set_channel(phy, chandef, offchannel);
1029 mutex_unlock(&dev->mutex);
1030
1031 return ret;
1032 }
1033
mt76_update_channel(struct mt76_phy * phy)1034 int mt76_update_channel(struct mt76_phy *phy)
1035 {
1036 struct ieee80211_hw *hw = phy->hw;
1037 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1038 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1039
1040 phy->radar_enabled = hw->conf.radar_enabled;
1041
1042 return mt76_set_channel(phy, chandef, offchannel);
1043 }
1044 EXPORT_SYMBOL_GPL(mt76_update_channel);
1045
1046 static struct mt76_sband *
mt76_get_survey_sband(struct mt76_phy * phy,int * idx)1047 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1048 {
1049 if (*idx < phy->sband_2g.sband.n_channels)
1050 return &phy->sband_2g;
1051
1052 *idx -= phy->sband_2g.sband.n_channels;
1053 if (*idx < phy->sband_5g.sband.n_channels)
1054 return &phy->sband_5g;
1055
1056 *idx -= phy->sband_5g.sband.n_channels;
1057 if (*idx < phy->sband_6g.sband.n_channels)
1058 return &phy->sband_6g;
1059
1060 *idx -= phy->sband_6g.sband.n_channels;
1061 return NULL;
1062 }
1063
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1064 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1065 struct survey_info *survey)
1066 {
1067 struct mt76_phy *phy = hw->priv;
1068 struct mt76_dev *dev = phy->dev;
1069 struct mt76_sband *sband = NULL;
1070 struct ieee80211_channel *chan;
1071 struct mt76_channel_state *state;
1072 int phy_idx = 0;
1073 int ret = 0;
1074
1075 mutex_lock(&dev->mutex);
1076
1077 for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1078 sband = NULL;
1079 phy = dev->phys[phy_idx];
1080 if (!phy || phy->hw != hw)
1081 continue;
1082
1083 sband = mt76_get_survey_sband(phy, &idx);
1084
1085 if (idx == 0 && phy->dev->drv->update_survey)
1086 mt76_update_survey(phy);
1087
1088 if (sband || !hw->wiphy->n_radio)
1089 break;
1090 }
1091
1092 if (!sband) {
1093 ret = -ENOENT;
1094 goto out;
1095 }
1096
1097 chan = &sband->sband.channels[idx];
1098 state = mt76_channel_state(phy, chan);
1099
1100 memset(survey, 0, sizeof(*survey));
1101 survey->channel = chan;
1102 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1103 survey->filled |= dev->drv->survey_flags;
1104 if (state->noise)
1105 survey->filled |= SURVEY_INFO_NOISE_DBM;
1106
1107 if (chan == phy->main_chandef.chan) {
1108 survey->filled |= SURVEY_INFO_IN_USE;
1109
1110 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1111 survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1112 }
1113
1114 survey->time_busy = div_u64(state->cc_busy, 1000);
1115 survey->time_rx = div_u64(state->cc_rx, 1000);
1116 survey->time = div_u64(state->cc_active, 1000);
1117 survey->noise = state->noise;
1118
1119 spin_lock_bh(&dev->cc_lock);
1120 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1121 survey->time_tx = div_u64(state->cc_tx, 1000);
1122 spin_unlock_bh(&dev->cc_lock);
1123
1124 out:
1125 mutex_unlock(&dev->mutex);
1126
1127 return ret;
1128 }
1129 EXPORT_SYMBOL_GPL(mt76_get_survey);
1130
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1131 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1132 struct ieee80211_key_conf *key)
1133 {
1134 struct ieee80211_key_seq seq;
1135 int i;
1136
1137 wcid->rx_check_pn = false;
1138
1139 if (!key)
1140 return;
1141
1142 if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1143 return;
1144
1145 wcid->rx_check_pn = true;
1146
1147 /* data frame */
1148 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1149 ieee80211_get_key_rx_seq(key, i, &seq);
1150 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1151 }
1152
1153 /* robust management frame */
1154 ieee80211_get_key_rx_seq(key, -1, &seq);
1155 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1156
1157 }
1158 EXPORT_SYMBOL(mt76_wcid_key_setup);
1159
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1160 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1161 {
1162 int signal = -128;
1163 u8 chains;
1164
1165 for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1166 int cur, diff;
1167
1168 cur = *chain_signal;
1169 if (!(chains & BIT(0)) ||
1170 cur > 0)
1171 continue;
1172
1173 if (cur > signal)
1174 swap(cur, signal);
1175
1176 diff = signal - cur;
1177 if (diff == 0)
1178 signal += 3;
1179 else if (diff <= 2)
1180 signal += 2;
1181 else if (diff <= 6)
1182 signal += 1;
1183 }
1184
1185 return signal;
1186 }
1187 EXPORT_SYMBOL(mt76_rx_signal);
1188
1189 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1190 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1191 struct ieee80211_hw **hw,
1192 struct ieee80211_sta **sta)
1193 {
1194 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1195 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1196 struct mt76_rx_status mstat;
1197
1198 mstat = *((struct mt76_rx_status *)skb->cb);
1199 memset(status, 0, sizeof(*status));
1200
1201 status->flag = mstat.flag;
1202 status->freq = mstat.freq;
1203 status->enc_flags = mstat.enc_flags;
1204 status->encoding = mstat.encoding;
1205 status->bw = mstat.bw;
1206 if (status->encoding == RX_ENC_EHT) {
1207 status->eht.ru = mstat.eht.ru;
1208 status->eht.gi = mstat.eht.gi;
1209 } else {
1210 status->he_ru = mstat.he_ru;
1211 status->he_gi = mstat.he_gi;
1212 status->he_dcm = mstat.he_dcm;
1213 }
1214 status->rate_idx = mstat.rate_idx;
1215 status->nss = mstat.nss;
1216 status->band = mstat.band;
1217 status->signal = mstat.signal;
1218 status->chains = mstat.chains;
1219 status->ampdu_reference = mstat.ampdu_ref;
1220 status->device_timestamp = mstat.timestamp;
1221 status->mactime = mstat.timestamp;
1222 status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1223 if (status->signal <= -128)
1224 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1225
1226 if (ieee80211_is_beacon(hdr->frame_control) ||
1227 ieee80211_is_probe_resp(hdr->frame_control))
1228 status->boottime_ns = ktime_get_boottime_ns();
1229
1230 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1231 BUILD_BUG_ON(sizeof(status->chain_signal) !=
1232 sizeof(mstat.chain_signal));
1233 memcpy(status->chain_signal, mstat.chain_signal,
1234 sizeof(mstat.chain_signal));
1235
1236 if (mstat.wcid) {
1237 status->link_valid = mstat.wcid->link_valid;
1238 status->link_id = mstat.wcid->link_id;
1239 }
1240
1241 *sta = wcid_to_sta(mstat.wcid);
1242 *hw = mt76_phy_hw(dev, mstat.phy_idx);
1243 }
1244
1245 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1246 mt76_check_ccmp_pn(struct sk_buff *skb)
1247 {
1248 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1249 struct mt76_wcid *wcid = status->wcid;
1250 struct ieee80211_hdr *hdr;
1251 int security_idx;
1252 int ret;
1253
1254 if (!(status->flag & RX_FLAG_DECRYPTED))
1255 return;
1256
1257 if (status->flag & RX_FLAG_ONLY_MONITOR)
1258 return;
1259
1260 if (!wcid || !wcid->rx_check_pn)
1261 return;
1262
1263 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1264 if (status->flag & RX_FLAG_8023)
1265 goto skip_hdr_check;
1266
1267 hdr = mt76_skb_get_hdr(skb);
1268 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1269 /*
1270 * Validate the first fragment both here and in mac80211
1271 * All further fragments will be validated by mac80211 only.
1272 */
1273 if (ieee80211_is_frag(hdr) &&
1274 !ieee80211_is_first_frag(hdr->frame_control))
1275 return;
1276 }
1277
1278 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1279 *
1280 * the recipient shall maintain a single replay counter for received
1281 * individually addressed robust Management frames that are received
1282 * with the To DS subfield equal to 0, [...]
1283 */
1284 if (ieee80211_is_mgmt(hdr->frame_control) &&
1285 !ieee80211_has_tods(hdr->frame_control))
1286 security_idx = IEEE80211_NUM_TIDS;
1287
1288 skip_hdr_check:
1289 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1290 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1291 sizeof(status->iv));
1292 if (ret <= 0) {
1293 status->flag |= RX_FLAG_ONLY_MONITOR;
1294 return;
1295 }
1296
1297 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1298
1299 if (status->flag & RX_FLAG_IV_STRIPPED)
1300 status->flag |= RX_FLAG_PN_VALIDATED;
1301 }
1302
1303 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1304 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1305 int len)
1306 {
1307 struct mt76_wcid *wcid = status->wcid;
1308 struct ieee80211_rx_status info = {
1309 .enc_flags = status->enc_flags,
1310 .rate_idx = status->rate_idx,
1311 .encoding = status->encoding,
1312 .band = status->band,
1313 .nss = status->nss,
1314 .bw = status->bw,
1315 };
1316 struct ieee80211_sta *sta;
1317 u32 airtime;
1318 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1319
1320 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1321 spin_lock(&dev->cc_lock);
1322 dev->cur_cc_bss_rx += airtime;
1323 spin_unlock(&dev->cc_lock);
1324
1325 if (!wcid || !wcid->sta)
1326 return;
1327
1328 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1329 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1330 }
1331
1332 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1333 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1334 {
1335 struct mt76_wcid *wcid;
1336 int wcid_idx;
1337
1338 if (!dev->rx_ampdu_len)
1339 return;
1340
1341 wcid_idx = dev->rx_ampdu_status.wcid_idx;
1342 if (wcid_idx < ARRAY_SIZE(dev->wcid))
1343 wcid = rcu_dereference(dev->wcid[wcid_idx]);
1344 else
1345 wcid = NULL;
1346 dev->rx_ampdu_status.wcid = wcid;
1347
1348 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1349
1350 dev->rx_ampdu_len = 0;
1351 dev->rx_ampdu_ref = 0;
1352 }
1353
1354 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1355 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1356 {
1357 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1358 struct mt76_wcid *wcid = status->wcid;
1359
1360 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1361 return;
1362
1363 if (!wcid || !wcid->sta) {
1364 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1365
1366 if (status->flag & RX_FLAG_8023)
1367 return;
1368
1369 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1370 return;
1371
1372 wcid = NULL;
1373 }
1374
1375 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1376 status->ampdu_ref != dev->rx_ampdu_ref)
1377 mt76_airtime_flush_ampdu(dev);
1378
1379 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1380 if (!dev->rx_ampdu_len ||
1381 status->ampdu_ref != dev->rx_ampdu_ref) {
1382 dev->rx_ampdu_status = *status;
1383 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1384 dev->rx_ampdu_ref = status->ampdu_ref;
1385 }
1386
1387 dev->rx_ampdu_len += skb->len;
1388 return;
1389 }
1390
1391 mt76_airtime_report(dev, status, skb->len);
1392 }
1393
1394 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1395 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1396 {
1397 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1398 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1399 struct ieee80211_sta *sta;
1400 struct ieee80211_hw *hw;
1401 struct mt76_wcid *wcid = status->wcid;
1402 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1403 bool ps;
1404
1405 hw = mt76_phy_hw(dev, status->phy_idx);
1406 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1407 !(status->flag & RX_FLAG_8023)) {
1408 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1409 if (sta)
1410 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1411 }
1412
1413 mt76_airtime_check(dev, skb);
1414
1415 if (!wcid || !wcid->sta)
1416 return;
1417
1418 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1419
1420 if (status->signal <= 0)
1421 ewma_signal_add(&wcid->rssi, -status->signal);
1422
1423 wcid->inactive_count = 0;
1424
1425 if (status->flag & RX_FLAG_8023)
1426 return;
1427
1428 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1429 return;
1430
1431 if (ieee80211_is_pspoll(hdr->frame_control)) {
1432 ieee80211_sta_pspoll(sta);
1433 return;
1434 }
1435
1436 if (ieee80211_has_morefrags(hdr->frame_control) ||
1437 !(ieee80211_is_mgmt(hdr->frame_control) ||
1438 ieee80211_is_data(hdr->frame_control)))
1439 return;
1440
1441 ps = ieee80211_has_pm(hdr->frame_control);
1442
1443 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1444 ieee80211_is_qos_nullfunc(hdr->frame_control)))
1445 ieee80211_sta_uapsd_trigger(sta, tidno);
1446
1447 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1448 return;
1449
1450 if (ps)
1451 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1452
1453 if (dev->drv->sta_ps)
1454 dev->drv->sta_ps(dev, sta, ps);
1455
1456 if (!ps)
1457 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1458
1459 ieee80211_sta_ps_transition(sta, ps);
1460 }
1461
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1462 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1463 struct napi_struct *napi)
1464 {
1465 struct ieee80211_sta *sta;
1466 struct ieee80211_hw *hw;
1467 struct sk_buff *skb, *tmp;
1468 LIST_HEAD(list);
1469
1470 spin_lock(&dev->rx_lock);
1471 while ((skb = __skb_dequeue(frames)) != NULL) {
1472 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1473
1474 mt76_check_ccmp_pn(skb);
1475 skb_shinfo(skb)->frag_list = NULL;
1476 mt76_rx_convert(dev, skb, &hw, &sta);
1477 ieee80211_rx_list(hw, sta, skb, &list);
1478
1479 /* subsequent amsdu frames */
1480 while (nskb) {
1481 skb = nskb;
1482 nskb = nskb->next;
1483 skb->next = NULL;
1484
1485 mt76_rx_convert(dev, skb, &hw, &sta);
1486 ieee80211_rx_list(hw, sta, skb, &list);
1487 }
1488 }
1489 spin_unlock(&dev->rx_lock);
1490
1491 if (!napi) {
1492 netif_receive_skb_list(&list);
1493 return;
1494 }
1495
1496 list_for_each_entry_safe(skb, tmp, &list, list) {
1497 skb_list_del_init(skb);
1498 napi_gro_receive(napi, skb);
1499 }
1500 }
1501
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1502 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1503 struct napi_struct *napi)
1504 {
1505 struct sk_buff_head frames;
1506 struct sk_buff *skb;
1507
1508 __skb_queue_head_init(&frames);
1509
1510 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1511 mt76_check_sta(dev, skb);
1512 if (mtk_wed_device_active(&dev->mmio.wed))
1513 __skb_queue_tail(&frames, skb);
1514 else
1515 mt76_rx_aggr_reorder(skb, &frames);
1516 }
1517
1518 mt76_rx_complete(dev, &frames, napi);
1519 }
1520 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1521
1522 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1523 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1524 struct ieee80211_sta *sta)
1525 {
1526 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1527 struct mt76_dev *dev = phy->dev;
1528 int ret;
1529 int i;
1530
1531 mutex_lock(&dev->mutex);
1532
1533 ret = dev->drv->sta_add(dev, vif, sta);
1534 if (ret)
1535 goto out;
1536
1537 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1538 struct mt76_txq *mtxq;
1539
1540 if (!sta->txq[i])
1541 continue;
1542
1543 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1544 mtxq->wcid = wcid->idx;
1545 }
1546
1547 ewma_signal_init(&wcid->rssi);
1548 rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1549 phy->num_sta++;
1550
1551 mt76_wcid_init(wcid, phy->band_idx);
1552 out:
1553 mutex_unlock(&dev->mutex);
1554
1555 return ret;
1556 }
1557
__mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1558 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1559 struct ieee80211_sta *sta)
1560 {
1561 struct mt76_dev *dev = phy->dev;
1562 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1563 int i, idx = wcid->idx;
1564
1565 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1566 mt76_rx_aggr_stop(dev, wcid, i);
1567
1568 if (dev->drv->sta_remove)
1569 dev->drv->sta_remove(dev, vif, sta);
1570
1571 mt76_wcid_cleanup(dev, wcid);
1572
1573 mt76_wcid_mask_clear(dev->wcid_mask, idx);
1574 phy->num_sta--;
1575 }
1576 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1577
1578 static void
mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1579 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1580 struct ieee80211_sta *sta)
1581 {
1582 struct mt76_dev *dev = phy->dev;
1583
1584 mutex_lock(&dev->mutex);
1585 __mt76_sta_remove(phy, vif, sta);
1586 mutex_unlock(&dev->mutex);
1587 }
1588
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1589 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1590 struct ieee80211_sta *sta,
1591 enum ieee80211_sta_state old_state,
1592 enum ieee80211_sta_state new_state)
1593 {
1594 struct mt76_phy *phy = hw->priv;
1595 struct mt76_dev *dev = phy->dev;
1596 enum mt76_sta_event ev;
1597
1598 phy = mt76_vif_phy(hw, vif);
1599 if (!phy)
1600 return -EINVAL;
1601
1602 if (old_state == IEEE80211_STA_NOTEXIST &&
1603 new_state == IEEE80211_STA_NONE)
1604 return mt76_sta_add(phy, vif, sta);
1605
1606 if (old_state == IEEE80211_STA_NONE &&
1607 new_state == IEEE80211_STA_NOTEXIST)
1608 mt76_sta_remove(phy, vif, sta);
1609
1610 if (!dev->drv->sta_event)
1611 return 0;
1612
1613 if (old_state == IEEE80211_STA_AUTH &&
1614 new_state == IEEE80211_STA_ASSOC)
1615 ev = MT76_STA_EVENT_ASSOC;
1616 else if (old_state == IEEE80211_STA_ASSOC &&
1617 new_state == IEEE80211_STA_AUTHORIZED)
1618 ev = MT76_STA_EVENT_AUTHORIZE;
1619 else if (old_state == IEEE80211_STA_ASSOC &&
1620 new_state == IEEE80211_STA_AUTH)
1621 ev = MT76_STA_EVENT_DISASSOC;
1622 else
1623 return 0;
1624
1625 return dev->drv->sta_event(dev, vif, sta, ev);
1626 }
1627 EXPORT_SYMBOL_GPL(mt76_sta_state);
1628
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1629 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1630 struct ieee80211_sta *sta)
1631 {
1632 struct mt76_phy *phy = hw->priv;
1633 struct mt76_dev *dev = phy->dev;
1634 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1635
1636 mutex_lock(&dev->mutex);
1637 spin_lock_bh(&dev->status_lock);
1638 rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1639 spin_unlock_bh(&dev->status_lock);
1640 mutex_unlock(&dev->mutex);
1641 }
1642 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1643
mt76_wcid_init(struct mt76_wcid * wcid,u8 band_idx)1644 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1645 {
1646 wcid->hw_key_idx = -1;
1647 wcid->phy_idx = band_idx;
1648
1649 INIT_LIST_HEAD(&wcid->tx_list);
1650 skb_queue_head_init(&wcid->tx_pending);
1651 skb_queue_head_init(&wcid->tx_offchannel);
1652
1653 INIT_LIST_HEAD(&wcid->list);
1654 idr_init(&wcid->pktid);
1655
1656 INIT_LIST_HEAD(&wcid->poll_list);
1657 }
1658 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1659
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1660 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1661 {
1662 struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1663 struct ieee80211_hw *hw;
1664 struct sk_buff_head list;
1665 struct sk_buff *skb;
1666
1667 mt76_tx_status_lock(dev, &list);
1668 mt76_tx_status_skb_get(dev, wcid, -1, &list);
1669 mt76_tx_status_unlock(dev, &list);
1670
1671 idr_destroy(&wcid->pktid);
1672
1673 spin_lock_bh(&phy->tx_lock);
1674
1675 if (!list_empty(&wcid->tx_list))
1676 list_del_init(&wcid->tx_list);
1677
1678 spin_lock(&wcid->tx_pending.lock);
1679 skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1680 spin_unlock(&wcid->tx_pending.lock);
1681
1682 spin_unlock_bh(&phy->tx_lock);
1683
1684 while ((skb = __skb_dequeue(&list)) != NULL) {
1685 hw = mt76_tx_status_get_hw(dev, skb);
1686 ieee80211_free_txskb(hw, skb);
1687 }
1688 }
1689 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1690
mt76_wcid_add_poll(struct mt76_dev * dev,struct mt76_wcid * wcid)1691 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1692 {
1693 if (test_bit(MT76_MCU_RESET, &dev->phy.state))
1694 return;
1695
1696 spin_lock_bh(&dev->sta_poll_lock);
1697 if (list_empty(&wcid->poll_list))
1698 list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1699 spin_unlock_bh(&dev->sta_poll_lock);
1700 }
1701 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1702
mt76_get_power_bound(struct mt76_phy * phy,s8 txpower)1703 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
1704 {
1705 int n_chains = hweight16(phy->chainmask);
1706
1707 txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
1708 txpower -= mt76_tx_power_path_delta(n_chains);
1709
1710 return txpower;
1711 }
1712 EXPORT_SYMBOL_GPL(mt76_get_power_bound);
1713
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,int * dbm)1714 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1715 unsigned int link_id, int *dbm)
1716 {
1717 struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1718 int n_chains, delta;
1719
1720 if (!phy)
1721 return -EINVAL;
1722
1723 n_chains = hweight16(phy->chainmask);
1724 delta = mt76_tx_power_path_delta(n_chains);
1725 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1726
1727 return 0;
1728 }
1729 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1730
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1731 int mt76_init_sar_power(struct ieee80211_hw *hw,
1732 const struct cfg80211_sar_specs *sar)
1733 {
1734 struct mt76_phy *phy = hw->priv;
1735 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1736 int i;
1737
1738 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1739 return -EINVAL;
1740
1741 for (i = 0; i < sar->num_sub_specs; i++) {
1742 u32 index = sar->sub_specs[i].freq_range_index;
1743 /* SAR specifies power limitaton in 0.25dbm */
1744 s32 power = sar->sub_specs[i].power >> 1;
1745
1746 if (power > 127 || power < -127)
1747 power = 127;
1748
1749 phy->frp[index].range = &capa->freq_ranges[index];
1750 phy->frp[index].power = power;
1751 }
1752
1753 return 0;
1754 }
1755 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1756
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1757 int mt76_get_sar_power(struct mt76_phy *phy,
1758 struct ieee80211_channel *chan,
1759 int power)
1760 {
1761 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1762 int freq, i;
1763
1764 if (!capa || !phy->frp)
1765 return power;
1766
1767 if (power > 127 || power < -127)
1768 power = 127;
1769
1770 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1771 for (i = 0 ; i < capa->num_freq_ranges; i++) {
1772 if (phy->frp[i].range &&
1773 freq >= phy->frp[i].range->start_freq &&
1774 freq < phy->frp[i].range->end_freq) {
1775 power = min_t(int, phy->frp[i].power, power);
1776 break;
1777 }
1778 }
1779
1780 return power;
1781 }
1782 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1783
1784 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1785 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1786 {
1787 if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1788 ieee80211_csa_finish(vif, 0);
1789 }
1790
mt76_csa_finish(struct mt76_dev * dev)1791 void mt76_csa_finish(struct mt76_dev *dev)
1792 {
1793 if (!dev->csa_complete)
1794 return;
1795
1796 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1797 IEEE80211_IFACE_ITER_RESUME_ALL,
1798 __mt76_csa_finish, dev);
1799
1800 dev->csa_complete = 0;
1801 }
1802 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1803
1804 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1805 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1806 {
1807 struct mt76_dev *dev = priv;
1808
1809 if (!vif->bss_conf.csa_active)
1810 return;
1811
1812 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1813 }
1814
mt76_csa_check(struct mt76_dev * dev)1815 void mt76_csa_check(struct mt76_dev *dev)
1816 {
1817 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1818 IEEE80211_IFACE_ITER_RESUME_ALL,
1819 __mt76_csa_check, dev);
1820 }
1821 EXPORT_SYMBOL_GPL(mt76_csa_check);
1822
1823 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1824 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1825 {
1826 return 0;
1827 }
1828 EXPORT_SYMBOL_GPL(mt76_set_tim);
1829
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1830 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1831 {
1832 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1833 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1834 u8 *hdr, *pn = status->iv;
1835
1836 __skb_push(skb, 8);
1837 memmove(skb->data, skb->data + 8, hdr_len);
1838 hdr = skb->data + hdr_len;
1839
1840 hdr[0] = pn[5];
1841 hdr[1] = pn[4];
1842 hdr[2] = 0;
1843 hdr[3] = 0x20 | (key_id << 6);
1844 hdr[4] = pn[3];
1845 hdr[5] = pn[2];
1846 hdr[6] = pn[1];
1847 hdr[7] = pn[0];
1848
1849 status->flag &= ~RX_FLAG_IV_STRIPPED;
1850 }
1851 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1852
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1853 int mt76_get_rate(struct mt76_dev *dev,
1854 struct ieee80211_supported_band *sband,
1855 int idx, bool cck)
1856 {
1857 bool is_2g = sband->band == NL80211_BAND_2GHZ;
1858 int i, offset = 0, len = sband->n_bitrates;
1859
1860 if (cck) {
1861 if (!is_2g)
1862 return 0;
1863
1864 idx &= ~BIT(2); /* short preamble */
1865 } else if (is_2g) {
1866 offset = 4;
1867 }
1868
1869 for (i = offset; i < len; i++) {
1870 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1871 return i;
1872 }
1873
1874 return 0;
1875 }
1876 EXPORT_SYMBOL_GPL(mt76_get_rate);
1877
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1878 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1879 const u8 *mac)
1880 {
1881 struct mt76_phy *phy = hw->priv;
1882
1883 set_bit(MT76_SCANNING, &phy->state);
1884 }
1885 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1886
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1887 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1888 {
1889 struct mt76_phy *phy = hw->priv;
1890
1891 clear_bit(MT76_SCANNING, &phy->state);
1892 }
1893 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1894
mt76_get_antenna(struct ieee80211_hw * hw,int radio_idx,u32 * tx_ant,u32 * rx_ant)1895 int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
1896 u32 *rx_ant)
1897 {
1898 struct mt76_phy *phy = hw->priv;
1899 struct mt76_dev *dev = phy->dev;
1900 int i;
1901
1902 mutex_lock(&dev->mutex);
1903 *tx_ant = 0;
1904 for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1905 if (dev->phys[i] && dev->phys[i]->hw == hw)
1906 *tx_ant |= dev->phys[i]->chainmask;
1907 *rx_ant = *tx_ant;
1908 mutex_unlock(&dev->mutex);
1909
1910 return 0;
1911 }
1912 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1913
1914 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)1915 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1916 int ring_base, void *wed, u32 flags)
1917 {
1918 struct mt76_queue *hwq;
1919 int err;
1920
1921 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1922 if (!hwq)
1923 return ERR_PTR(-ENOMEM);
1924
1925 hwq->flags = flags;
1926 hwq->wed = wed;
1927
1928 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1929 if (err < 0)
1930 return ERR_PTR(err);
1931
1932 return hwq;
1933 }
1934 EXPORT_SYMBOL_GPL(mt76_init_queue);
1935
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1936 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1937 struct mt76_sta_stats *stats, bool eht)
1938 {
1939 int i, ei = wi->initial_stat_idx;
1940 u64 *data = wi->data;
1941
1942 wi->sta_count++;
1943
1944 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1945 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1946 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1947 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1948 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1949 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1950 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1951 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1952 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1953 if (eht) {
1954 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1955 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1956 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1957 }
1958
1959 for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1960 data[ei++] += stats->tx_bw[i];
1961
1962 for (i = 0; i < (eht ? 14 : 12); i++)
1963 data[ei++] += stats->tx_mcs[i];
1964
1965 for (i = 0; i < 4; i++)
1966 data[ei++] += stats->tx_nss[i];
1967
1968 wi->worker_stat_count = ei - wi->initial_stat_idx;
1969 }
1970 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1971
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)1972 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1973 {
1974 #ifdef CONFIG_PAGE_POOL_STATS
1975 struct page_pool_stats stats = {};
1976 int i;
1977
1978 mt76_for_each_q_rx(dev, i)
1979 page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1980
1981 page_pool_ethtool_stats_get(data, &stats);
1982 *index += page_pool_ethtool_stats_get_count();
1983 #endif
1984 }
1985 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1986
mt76_phy_dfs_state(struct mt76_phy * phy)1987 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1988 {
1989 struct ieee80211_hw *hw = phy->hw;
1990 struct mt76_dev *dev = phy->dev;
1991
1992 if (dev->region == NL80211_DFS_UNSET ||
1993 test_bit(MT76_SCANNING, &phy->state))
1994 return MT_DFS_STATE_DISABLED;
1995
1996 if (!phy->radar_enabled) {
1997 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1998 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1999 return MT_DFS_STATE_ACTIVE;
2000
2001 return MT_DFS_STATE_DISABLED;
2002 }
2003
2004 if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
2005 return MT_DFS_STATE_CAC;
2006
2007 return MT_DFS_STATE_ACTIVE;
2008 }
2009 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
2010
mt76_vif_cleanup(struct mt76_dev * dev,struct ieee80211_vif * vif)2011 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
2012 {
2013 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2014 struct mt76_vif_data *mvif = mlink->mvif;
2015
2016 rcu_assign_pointer(mvif->link[0], NULL);
2017 mt76_abort_scan(dev);
2018 if (mvif->roc_phy)
2019 mt76_abort_roc(mvif->roc_phy);
2020 }
2021 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2022