1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8
9 #define CHAN2G(_idx, _freq) { \
10 .band = NL80211_BAND_2GHZ, \
11 .center_freq = (_freq), \
12 .hw_value = (_idx), \
13 .max_power = 30, \
14 }
15
16 #define CHAN5G(_idx, _freq) { \
17 .band = NL80211_BAND_5GHZ, \
18 .center_freq = (_freq), \
19 .hw_value = (_idx), \
20 .max_power = 30, \
21 }
22
23 #define CHAN6G(_idx, _freq) { \
24 .band = NL80211_BAND_6GHZ, \
25 .center_freq = (_freq), \
26 .hw_value = (_idx), \
27 .max_power = 30, \
28 }
29
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 CHAN2G(1, 2412),
32 CHAN2G(2, 2417),
33 CHAN2G(3, 2422),
34 CHAN2G(4, 2427),
35 CHAN2G(5, 2432),
36 CHAN2G(6, 2437),
37 CHAN2G(7, 2442),
38 CHAN2G(8, 2447),
39 CHAN2G(9, 2452),
40 CHAN2G(10, 2457),
41 CHAN2G(11, 2462),
42 CHAN2G(12, 2467),
43 CHAN2G(13, 2472),
44 CHAN2G(14, 2484),
45 };
46
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 CHAN5G(36, 5180),
49 CHAN5G(40, 5200),
50 CHAN5G(44, 5220),
51 CHAN5G(48, 5240),
52
53 CHAN5G(52, 5260),
54 CHAN5G(56, 5280),
55 CHAN5G(60, 5300),
56 CHAN5G(64, 5320),
57
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
61 CHAN5G(112, 5560),
62 CHAN5G(116, 5580),
63 CHAN5G(120, 5600),
64 CHAN5G(124, 5620),
65 CHAN5G(128, 5640),
66 CHAN5G(132, 5660),
67 CHAN5G(136, 5680),
68 CHAN5G(140, 5700),
69 CHAN5G(144, 5720),
70
71 CHAN5G(149, 5745),
72 CHAN5G(153, 5765),
73 CHAN5G(157, 5785),
74 CHAN5G(161, 5805),
75 CHAN5G(165, 5825),
76 CHAN5G(169, 5845),
77 CHAN5G(173, 5865),
78 CHAN5G(177, 5885),
79 };
80
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 /* UNII-5 */
83 CHAN6G(1, 5955),
84 CHAN6G(5, 5975),
85 CHAN6G(9, 5995),
86 CHAN6G(13, 6015),
87 CHAN6G(17, 6035),
88 CHAN6G(21, 6055),
89 CHAN6G(25, 6075),
90 CHAN6G(29, 6095),
91 CHAN6G(33, 6115),
92 CHAN6G(37, 6135),
93 CHAN6G(41, 6155),
94 CHAN6G(45, 6175),
95 CHAN6G(49, 6195),
96 CHAN6G(53, 6215),
97 CHAN6G(57, 6235),
98 CHAN6G(61, 6255),
99 CHAN6G(65, 6275),
100 CHAN6G(69, 6295),
101 CHAN6G(73, 6315),
102 CHAN6G(77, 6335),
103 CHAN6G(81, 6355),
104 CHAN6G(85, 6375),
105 CHAN6G(89, 6395),
106 CHAN6G(93, 6415),
107 /* UNII-6 */
108 CHAN6G(97, 6435),
109 CHAN6G(101, 6455),
110 CHAN6G(105, 6475),
111 CHAN6G(109, 6495),
112 CHAN6G(113, 6515),
113 CHAN6G(117, 6535),
114 /* UNII-7 */
115 CHAN6G(121, 6555),
116 CHAN6G(125, 6575),
117 CHAN6G(129, 6595),
118 CHAN6G(133, 6615),
119 CHAN6G(137, 6635),
120 CHAN6G(141, 6655),
121 CHAN6G(145, 6675),
122 CHAN6G(149, 6695),
123 CHAN6G(153, 6715),
124 CHAN6G(157, 6735),
125 CHAN6G(161, 6755),
126 CHAN6G(165, 6775),
127 CHAN6G(169, 6795),
128 CHAN6G(173, 6815),
129 CHAN6G(177, 6835),
130 CHAN6G(181, 6855),
131 CHAN6G(185, 6875),
132 /* UNII-8 */
133 CHAN6G(189, 6895),
134 CHAN6G(193, 6915),
135 CHAN6G(197, 6935),
136 CHAN6G(201, 6955),
137 CHAN6G(205, 6975),
138 CHAN6G(209, 6995),
139 CHAN6G(213, 7015),
140 CHAN6G(217, 7035),
141 CHAN6G(221, 7055),
142 CHAN6G(225, 7075),
143 CHAN6G(229, 7095),
144 CHAN6G(233, 7115),
145 };
146
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
158 };
159
160 struct ieee80211_rate mt76_rates[] = {
161 CCK_RATE(0, 10),
162 CCK_RATE(1, 20),
163 CCK_RATE(2, 55),
164 CCK_RATE(3, 110),
165 OFDM_RATE(11, 60),
166 OFDM_RATE(15, 90),
167 OFDM_RATE(10, 120),
168 OFDM_RATE(14, 180),
169 OFDM_RATE(9, 240),
170 OFDM_RATE(13, 360),
171 OFDM_RATE(8, 480),
172 OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 { .start_freq = 2402, .end_freq = 2494, },
178 { .start_freq = 5150, .end_freq = 5350, },
179 { .start_freq = 5350, .end_freq = 5470, },
180 { .start_freq = 5470, .end_freq = 5725, },
181 { .start_freq = 5725, .end_freq = 5950, },
182 { .start_freq = 5945, .end_freq = 6165, },
183 { .start_freq = 6165, .end_freq = 6405, },
184 { .start_freq = 6405, .end_freq = 6525, },
185 { .start_freq = 6525, .end_freq = 6705, },
186 { .start_freq = 6705, .end_freq = 6865, },
187 { .start_freq = 6865, .end_freq = 7125, },
188 };
189
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 .type = NL80211_SAR_TYPE_POWER,
192 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 .freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 struct mt76_dev *dev = phy->dev;
199 struct ieee80211_hw *hw = phy->hw;
200 struct device_node *np = dev->dev->of_node;
201
202 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 return 0;
204
205 np = of_get_child_by_name(np, "led");
206 if (np) {
207 if (!of_device_is_available(np)) {
208 of_node_put(np);
209 dev_info(dev->dev,
210 "led registration was explicitly disabled by dts\n");
211 return 0;
212 }
213
214 if (phy == &dev->phy) {
215 int led_pin;
216
217 if (!of_property_read_u32(np, "led-sources", &led_pin))
218 phy->leds.pin = led_pin;
219
220 phy->leds.al =
221 of_property_read_bool(np, "led-active-low");
222 }
223
224 of_node_put(np);
225 }
226
227 snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 wiphy_name(hw->wiphy));
229
230 phy->leds.cdev.name = phy->leds.name;
231 phy->leds.cdev.default_trigger =
232 ieee80211_create_tpt_led_trigger(hw,
233 IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 mt76_tpt_blink,
235 ARRAY_SIZE(mt76_tpt_blink));
236
237 dev_info(dev->dev,
238 "registering led '%s'\n", phy->leds.name);
239
240 return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242
mt76_led_cleanup(struct mt76_phy * phy)243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 return;
247
248 led_classdev_unregister(&phy->leds.cdev);
249 }
250
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 struct ieee80211_supported_band *sband,
253 bool vht)
254 {
255 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 int i, nstream = hweight8(phy->antenna_mask);
257 struct ieee80211_sta_vht_cap *vht_cap;
258 u16 mcs_map = 0;
259
260 if (nstream > 1)
261 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 else
263 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264
265 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267
268 if (!vht)
269 return;
270
271 vht_cap = &sband->vht_cap;
272 if (nstream > 1)
273 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 else
275 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278
279 for (i = 0; i < 8; i++) {
280 if (i < nstream)
281 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 else
283 mcs_map |=
284 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 }
286 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 vht_cap->vht_mcs.tx_highest |=
290 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 if (phy->cap.has_2ghz)
296 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 if (phy->cap.has_5ghz)
298 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 if (phy->cap.has_6ghz)
300 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303
304 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 const struct ieee80211_channel *chan, int n_chan,
307 struct ieee80211_rate *rates, int n_rates,
308 bool ht, bool vht)
309 {
310 struct ieee80211_supported_band *sband = &msband->sband;
311 struct ieee80211_sta_vht_cap *vht_cap;
312 struct ieee80211_sta_ht_cap *ht_cap;
313 struct mt76_dev *dev = phy->dev;
314 void *chanlist;
315 int size;
316
317 size = n_chan * sizeof(*chan);
318 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 if (!chanlist)
320 return -ENOMEM;
321
322 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 GFP_KERNEL);
324 if (!msband->chan)
325 return -ENOMEM;
326
327 sband->channels = chanlist;
328 sband->n_channels = n_chan;
329 sband->bitrates = rates;
330 sband->n_bitrates = n_rates;
331
332 if (!ht)
333 return 0;
334
335 ht_cap = &sband->ht_cap;
336 ht_cap->ht_supported = true;
337 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 IEEE80211_HT_CAP_GRN_FLD |
339 IEEE80211_HT_CAP_SGI_20 |
340 IEEE80211_HT_CAP_SGI_40 |
341 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342
343 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345
346 mt76_init_stream_cap(phy, sband, vht);
347
348 if (!vht)
349 return 0;
350
351 vht_cap = &sband->vht_cap;
352 vht_cap->vht_supported = true;
353 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 IEEE80211_VHT_CAP_RXSTBC_1 |
355 IEEE80211_VHT_CAP_SHORT_GI_80 |
356 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357
358 return 0;
359 }
360
361 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 int n_rates)
364 {
365 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366
367 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 ARRAY_SIZE(mt76_channels_2ghz), rates,
369 n_rates, true, false);
370 }
371
372 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 int n_rates, bool vht)
375 {
376 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377
378 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 ARRAY_SIZE(mt76_channels_5ghz), rates,
380 n_rates, true, vht);
381 }
382
383 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 int n_rates)
386 {
387 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388
389 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 ARRAY_SIZE(mt76_channels_6ghz), rates,
391 n_rates, false, false);
392 }
393
394 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 enum nl80211_band band)
397 {
398 struct ieee80211_supported_band *sband = &msband->sband;
399 bool found = false;
400 int i;
401
402 if (!sband)
403 return;
404
405 for (i = 0; i < sband->n_channels; i++) {
406 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 continue;
408
409 found = true;
410 break;
411 }
412
413 if (found) {
414 phy->chandef.chan = &sband->channels[0];
415 phy->chan_state = &msband->chan[0];
416 return;
417 }
418
419 sband->n_channels = 0;
420 phy->hw->wiphy->bands[band] = NULL;
421 }
422
423 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)424 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
425 {
426 struct mt76_dev *dev = phy->dev;
427 struct wiphy *wiphy = hw->wiphy;
428
429 INIT_LIST_HEAD(&phy->tx_list);
430 spin_lock_init(&phy->tx_lock);
431
432 SET_IEEE80211_DEV(hw, dev->dev);
433 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
434
435 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
436 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
437 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
438 WIPHY_FLAG_SUPPORTS_TDLS |
439 WIPHY_FLAG_AP_UAPSD;
440
441 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
442 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
443 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
444
445 wiphy->available_antennas_tx = phy->antenna_mask;
446 wiphy->available_antennas_rx = phy->antenna_mask;
447
448 wiphy->sar_capa = &mt76_sar_capa;
449 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
450 sizeof(struct mt76_freq_range_power),
451 GFP_KERNEL);
452 if (!phy->frp)
453 return -ENOMEM;
454
455 hw->txq_data_size = sizeof(struct mt76_txq);
456 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
457
458 if (!hw->max_tx_fragments)
459 hw->max_tx_fragments = 16;
460
461 ieee80211_hw_set(hw, SIGNAL_DBM);
462 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
463 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
464 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
465 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
466 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
467 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
468 ieee80211_hw_set(hw, SPECTRUM_MGMT);
469
470 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
471 hw->max_tx_fragments > 1) {
472 ieee80211_hw_set(hw, TX_AMSDU);
473 ieee80211_hw_set(hw, TX_FRAG_LIST);
474 }
475
476 ieee80211_hw_set(hw, MFP_CAPABLE);
477 ieee80211_hw_set(hw, AP_LINK_PS);
478 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
479
480 return 0;
481 }
482
483 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)484 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
485 const struct ieee80211_ops *ops, u8 band_idx)
486 {
487 struct ieee80211_hw *hw;
488 unsigned int phy_size;
489 struct mt76_phy *phy;
490
491 phy_size = ALIGN(sizeof(*phy), 8);
492 hw = ieee80211_alloc_hw(size + phy_size, ops);
493 if (!hw)
494 return NULL;
495
496 phy = hw->priv;
497 phy->dev = dev;
498 phy->hw = hw;
499 phy->priv = hw->priv + phy_size;
500 phy->band_idx = band_idx;
501
502 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
503 hw->wiphy->interface_modes =
504 BIT(NL80211_IFTYPE_STATION) |
505 BIT(NL80211_IFTYPE_AP) |
506 #ifdef CONFIG_MAC80211_MESH
507 BIT(NL80211_IFTYPE_MESH_POINT) |
508 #endif
509 BIT(NL80211_IFTYPE_P2P_CLIENT) |
510 BIT(NL80211_IFTYPE_P2P_GO) |
511 BIT(NL80211_IFTYPE_ADHOC);
512
513 return phy;
514 }
515 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
516
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)517 int mt76_register_phy(struct mt76_phy *phy, bool vht,
518 struct ieee80211_rate *rates, int n_rates)
519 {
520 int ret;
521
522 ret = mt76_phy_init(phy, phy->hw);
523 if (ret)
524 return ret;
525
526 if (phy->cap.has_2ghz) {
527 ret = mt76_init_sband_2g(phy, rates, n_rates);
528 if (ret)
529 return ret;
530 }
531
532 if (phy->cap.has_5ghz) {
533 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
534 if (ret)
535 return ret;
536 }
537
538 if (phy->cap.has_6ghz) {
539 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
540 if (ret)
541 return ret;
542 }
543
544 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
545 ret = mt76_led_init(phy);
546 if (ret)
547 return ret;
548 }
549
550 wiphy_read_of_freq_limits(phy->hw->wiphy);
551 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
552 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
553 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
554
555 ret = ieee80211_register_hw(phy->hw);
556 if (ret)
557 return ret;
558
559 set_bit(MT76_STATE_REGISTERED, &phy->state);
560 phy->dev->phys[phy->band_idx] = phy;
561
562 return 0;
563 }
564 EXPORT_SYMBOL_GPL(mt76_register_phy);
565
mt76_unregister_phy(struct mt76_phy * phy)566 void mt76_unregister_phy(struct mt76_phy *phy)
567 {
568 struct mt76_dev *dev = phy->dev;
569
570 if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
571 return;
572
573 if (IS_ENABLED(CONFIG_MT76_LEDS))
574 mt76_led_cleanup(phy);
575 mt76_tx_status_check(dev, true);
576 ieee80211_unregister_hw(phy->hw);
577 dev->phys[phy->band_idx] = NULL;
578 }
579 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
580
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)581 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
582 {
583 bool is_qrx = mt76_queue_is_rx(dev, q);
584 struct page_pool_params pp_params = {
585 .order = 0,
586 .flags = 0,
587 .nid = NUMA_NO_NODE,
588 .dev = dev->dma_dev,
589 };
590 int idx = is_qrx ? q - dev->q_rx : -1;
591
592 /* Allocate page_pools just for rx/wed_tx_free queues */
593 if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
594 return 0;
595
596 switch (idx) {
597 case MT_RXQ_MAIN:
598 case MT_RXQ_BAND1:
599 case MT_RXQ_BAND2:
600 pp_params.pool_size = 256;
601 break;
602 default:
603 pp_params.pool_size = 16;
604 break;
605 }
606
607 if (mt76_is_mmio(dev)) {
608 /* rely on page_pool for DMA mapping */
609 pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
610 pp_params.dma_dir = DMA_FROM_DEVICE;
611 pp_params.max_len = PAGE_SIZE;
612 pp_params.offset = 0;
613 /* NAPI is available just for rx queues */
614 if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
615 pp_params.napi = &dev->napi[idx];
616 }
617
618 q->page_pool = page_pool_create(&pp_params);
619 if (IS_ERR(q->page_pool)) {
620 int err = PTR_ERR(q->page_pool);
621
622 q->page_pool = NULL;
623 return err;
624 }
625
626 return 0;
627 }
628 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
629
630 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)631 mt76_alloc_device(struct device *pdev, unsigned int size,
632 const struct ieee80211_ops *ops,
633 const struct mt76_driver_ops *drv_ops)
634 {
635 struct ieee80211_hw *hw;
636 struct mt76_phy *phy;
637 struct mt76_dev *dev;
638 int i;
639
640 hw = ieee80211_alloc_hw(size, ops);
641 if (!hw)
642 return NULL;
643
644 dev = hw->priv;
645 dev->hw = hw;
646 dev->dev = pdev;
647 dev->drv = drv_ops;
648 dev->dma_dev = pdev;
649
650 phy = &dev->phy;
651 phy->dev = dev;
652 phy->hw = hw;
653 phy->band_idx = MT_BAND0;
654 dev->phys[phy->band_idx] = phy;
655
656 spin_lock_init(&dev->rx_lock);
657 spin_lock_init(&dev->lock);
658 spin_lock_init(&dev->cc_lock);
659 spin_lock_init(&dev->status_lock);
660 spin_lock_init(&dev->wed_lock);
661 mutex_init(&dev->mutex);
662 init_waitqueue_head(&dev->tx_wait);
663
664 skb_queue_head_init(&dev->mcu.res_q);
665 init_waitqueue_head(&dev->mcu.wait);
666 mutex_init(&dev->mcu.mutex);
667 dev->tx_worker.fn = mt76_tx_worker;
668
669 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
670 hw->wiphy->interface_modes =
671 BIT(NL80211_IFTYPE_STATION) |
672 BIT(NL80211_IFTYPE_AP) |
673 #ifdef CONFIG_MAC80211_MESH
674 BIT(NL80211_IFTYPE_MESH_POINT) |
675 #endif
676 BIT(NL80211_IFTYPE_P2P_CLIENT) |
677 BIT(NL80211_IFTYPE_P2P_GO) |
678 BIT(NL80211_IFTYPE_ADHOC);
679
680 spin_lock_init(&dev->token_lock);
681 idr_init(&dev->token);
682
683 spin_lock_init(&dev->rx_token_lock);
684 idr_init(&dev->rx_token);
685
686 INIT_LIST_HEAD(&dev->wcid_list);
687 INIT_LIST_HEAD(&dev->sta_poll_list);
688 spin_lock_init(&dev->sta_poll_lock);
689
690 INIT_LIST_HEAD(&dev->txwi_cache);
691 INIT_LIST_HEAD(&dev->rxwi_cache);
692 dev->token_size = dev->drv->token_size;
693
694 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
695 skb_queue_head_init(&dev->rx_skb[i]);
696
697 dev->wq = alloc_ordered_workqueue("mt76", 0);
698 if (!dev->wq) {
699 ieee80211_free_hw(hw);
700 return NULL;
701 }
702
703 return dev;
704 }
705 EXPORT_SYMBOL_GPL(mt76_alloc_device);
706
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)707 int mt76_register_device(struct mt76_dev *dev, bool vht,
708 struct ieee80211_rate *rates, int n_rates)
709 {
710 struct ieee80211_hw *hw = dev->hw;
711 struct mt76_phy *phy = &dev->phy;
712 int ret;
713
714 dev_set_drvdata(dev->dev, dev);
715 mt76_wcid_init(&dev->global_wcid);
716 ret = mt76_phy_init(phy, hw);
717 if (ret)
718 return ret;
719
720 if (phy->cap.has_2ghz) {
721 ret = mt76_init_sband_2g(phy, rates, n_rates);
722 if (ret)
723 return ret;
724 }
725
726 if (phy->cap.has_5ghz) {
727 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
728 if (ret)
729 return ret;
730 }
731
732 if (phy->cap.has_6ghz) {
733 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
734 if (ret)
735 return ret;
736 }
737
738 wiphy_read_of_freq_limits(hw->wiphy);
739 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
740 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
741 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
742
743 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
744 ret = mt76_led_init(phy);
745 if (ret)
746 return ret;
747 }
748
749 ret = ieee80211_register_hw(hw);
750 if (ret)
751 return ret;
752
753 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
754 set_bit(MT76_STATE_REGISTERED, &phy->state);
755 sched_set_fifo_low(dev->tx_worker.task);
756
757 return 0;
758 }
759 EXPORT_SYMBOL_GPL(mt76_register_device);
760
mt76_unregister_device(struct mt76_dev * dev)761 void mt76_unregister_device(struct mt76_dev *dev)
762 {
763 struct ieee80211_hw *hw = dev->hw;
764
765 if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
766 return;
767
768 if (IS_ENABLED(CONFIG_MT76_LEDS))
769 mt76_led_cleanup(&dev->phy);
770 mt76_tx_status_check(dev, true);
771 mt76_wcid_cleanup(dev, &dev->global_wcid);
772 ieee80211_unregister_hw(hw);
773 }
774 EXPORT_SYMBOL_GPL(mt76_unregister_device);
775
mt76_free_device(struct mt76_dev * dev)776 void mt76_free_device(struct mt76_dev *dev)
777 {
778 mt76_worker_teardown(&dev->tx_worker);
779 if (dev->wq) {
780 destroy_workqueue(dev->wq);
781 dev->wq = NULL;
782 }
783 ieee80211_free_hw(dev->hw);
784 }
785 EXPORT_SYMBOL_GPL(mt76_free_device);
786
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)787 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
788 {
789 struct sk_buff *skb = phy->rx_amsdu[q].head;
790 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
791 struct mt76_dev *dev = phy->dev;
792
793 phy->rx_amsdu[q].head = NULL;
794 phy->rx_amsdu[q].tail = NULL;
795
796 /*
797 * Validate if the amsdu has a proper first subframe.
798 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
799 * flag of the QoS header gets flipped. In such cases, the first
800 * subframe has a LLC/SNAP header in the location of the destination
801 * address.
802 */
803 if (skb_shinfo(skb)->frag_list) {
804 int offset = 0;
805
806 if (!(status->flag & RX_FLAG_8023)) {
807 offset = ieee80211_get_hdrlen_from_skb(skb);
808
809 if ((status->flag &
810 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
811 RX_FLAG_DECRYPTED)
812 offset += 8;
813 }
814
815 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
816 dev_kfree_skb(skb);
817 return;
818 }
819 }
820 __skb_queue_tail(&dev->rx_skb[q], skb);
821 }
822
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)823 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
824 struct sk_buff *skb)
825 {
826 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
827
828 if (phy->rx_amsdu[q].head &&
829 (!status->amsdu || status->first_amsdu ||
830 status->seqno != phy->rx_amsdu[q].seqno))
831 mt76_rx_release_amsdu(phy, q);
832
833 if (!phy->rx_amsdu[q].head) {
834 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
835 phy->rx_amsdu[q].seqno = status->seqno;
836 phy->rx_amsdu[q].head = skb;
837 } else {
838 *phy->rx_amsdu[q].tail = skb;
839 phy->rx_amsdu[q].tail = &skb->next;
840 }
841
842 if (!status->amsdu || status->last_amsdu)
843 mt76_rx_release_amsdu(phy, q);
844 }
845
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)846 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
847 {
848 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
849 struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
850
851 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
852 dev_kfree_skb(skb);
853 return;
854 }
855
856 #ifdef CONFIG_NL80211_TESTMODE
857 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
858 phy->test.rx_stats.packets[q]++;
859 if (status->flag & RX_FLAG_FAILED_FCS_CRC)
860 phy->test.rx_stats.fcs_error[q]++;
861 }
862 #endif
863
864 mt76_rx_release_burst(phy, q, skb);
865 }
866 EXPORT_SYMBOL_GPL(mt76_rx);
867
mt76_has_tx_pending(struct mt76_phy * phy)868 bool mt76_has_tx_pending(struct mt76_phy *phy)
869 {
870 struct mt76_queue *q;
871 int i;
872
873 for (i = 0; i < __MT_TXQ_MAX; i++) {
874 q = phy->q_tx[i];
875 if (q && q->queued)
876 return true;
877 }
878
879 return false;
880 }
881 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
882
883 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)884 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
885 {
886 struct mt76_sband *msband;
887 int idx;
888
889 if (c->band == NL80211_BAND_2GHZ)
890 msband = &phy->sband_2g;
891 else if (c->band == NL80211_BAND_6GHZ)
892 msband = &phy->sband_6g;
893 else
894 msband = &phy->sband_5g;
895
896 idx = c - &msband->sband.channels[0];
897 return &msband->chan[idx];
898 }
899
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)900 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
901 {
902 struct mt76_channel_state *state = phy->chan_state;
903
904 state->cc_active += ktime_to_us(ktime_sub(time,
905 phy->survey_time));
906 phy->survey_time = time;
907 }
908 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
909
mt76_update_survey(struct mt76_phy * phy)910 void mt76_update_survey(struct mt76_phy *phy)
911 {
912 struct mt76_dev *dev = phy->dev;
913 ktime_t cur_time;
914
915 if (dev->drv->update_survey)
916 dev->drv->update_survey(phy);
917
918 cur_time = ktime_get_boottime();
919 mt76_update_survey_active_time(phy, cur_time);
920
921 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
922 struct mt76_channel_state *state = phy->chan_state;
923
924 spin_lock_bh(&dev->cc_lock);
925 state->cc_bss_rx += dev->cur_cc_bss_rx;
926 dev->cur_cc_bss_rx = 0;
927 spin_unlock_bh(&dev->cc_lock);
928 }
929 }
930 EXPORT_SYMBOL_GPL(mt76_update_survey);
931
mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)932 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
933 bool offchannel)
934 {
935 struct mt76_dev *dev = phy->dev;
936 int timeout = HZ / 5;
937 int ret;
938
939 cancel_delayed_work_sync(&phy->mac_work);
940
941 mutex_lock(&dev->mutex);
942 set_bit(MT76_RESET, &phy->state);
943
944 mt76_worker_disable(&dev->tx_worker);
945 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
946 mt76_update_survey(phy);
947
948 if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
949 phy->chandef.width != chandef->width)
950 phy->dfs_state = MT_DFS_STATE_UNKNOWN;
951
952 phy->chandef = *chandef;
953 phy->chan_state = mt76_channel_state(phy, chandef->chan);
954 phy->offchannel = offchannel;
955
956 if (!offchannel)
957 phy->main_chan = chandef->chan;
958
959 if (chandef->chan != phy->main_chan)
960 memset(phy->chan_state, 0, sizeof(*phy->chan_state));
961 mt76_worker_enable(&dev->tx_worker);
962
963 ret = dev->drv->set_channel(phy);
964
965 clear_bit(MT76_RESET, &phy->state);
966 mt76_worker_schedule(&dev->tx_worker);
967
968 mutex_unlock(&dev->mutex);
969
970 return ret;
971 }
972
mt76_update_channel(struct mt76_phy * phy)973 int mt76_update_channel(struct mt76_phy *phy)
974 {
975 struct ieee80211_hw *hw = phy->hw;
976 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
977 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
978
979 return mt76_set_channel(phy, chandef, offchannel);
980 }
981 EXPORT_SYMBOL_GPL(mt76_update_channel);
982
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)983 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
984 struct survey_info *survey)
985 {
986 struct mt76_phy *phy = hw->priv;
987 struct mt76_dev *dev = phy->dev;
988 struct mt76_sband *sband;
989 struct ieee80211_channel *chan;
990 struct mt76_channel_state *state;
991 int ret = 0;
992
993 mutex_lock(&dev->mutex);
994 if (idx == 0 && dev->drv->update_survey)
995 mt76_update_survey(phy);
996
997 if (idx >= phy->sband_2g.sband.n_channels +
998 phy->sband_5g.sband.n_channels) {
999 idx -= (phy->sband_2g.sband.n_channels +
1000 phy->sband_5g.sband.n_channels);
1001 sband = &phy->sband_6g;
1002 } else if (idx >= phy->sband_2g.sband.n_channels) {
1003 idx -= phy->sband_2g.sband.n_channels;
1004 sband = &phy->sband_5g;
1005 } else {
1006 sband = &phy->sband_2g;
1007 }
1008
1009 if (idx >= sband->sband.n_channels) {
1010 ret = -ENOENT;
1011 goto out;
1012 }
1013
1014 chan = &sband->sband.channels[idx];
1015 state = mt76_channel_state(phy, chan);
1016
1017 memset(survey, 0, sizeof(*survey));
1018 survey->channel = chan;
1019 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1020 survey->filled |= dev->drv->survey_flags;
1021 if (state->noise)
1022 survey->filled |= SURVEY_INFO_NOISE_DBM;
1023
1024 if (chan == phy->main_chan) {
1025 survey->filled |= SURVEY_INFO_IN_USE;
1026
1027 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1028 survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1029 }
1030
1031 survey->time_busy = div_u64(state->cc_busy, 1000);
1032 survey->time_rx = div_u64(state->cc_rx, 1000);
1033 survey->time = div_u64(state->cc_active, 1000);
1034 survey->noise = state->noise;
1035
1036 spin_lock_bh(&dev->cc_lock);
1037 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1038 survey->time_tx = div_u64(state->cc_tx, 1000);
1039 spin_unlock_bh(&dev->cc_lock);
1040
1041 out:
1042 mutex_unlock(&dev->mutex);
1043
1044 return ret;
1045 }
1046 EXPORT_SYMBOL_GPL(mt76_get_survey);
1047
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1048 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1049 struct ieee80211_key_conf *key)
1050 {
1051 struct ieee80211_key_seq seq;
1052 int i;
1053
1054 wcid->rx_check_pn = false;
1055
1056 if (!key)
1057 return;
1058
1059 if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1060 return;
1061
1062 wcid->rx_check_pn = true;
1063
1064 /* data frame */
1065 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1066 ieee80211_get_key_rx_seq(key, i, &seq);
1067 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1068 }
1069
1070 /* robust management frame */
1071 ieee80211_get_key_rx_seq(key, -1, &seq);
1072 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1073
1074 }
1075 EXPORT_SYMBOL(mt76_wcid_key_setup);
1076
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1077 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1078 {
1079 int signal = -128;
1080 u8 chains;
1081
1082 for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1083 int cur, diff;
1084
1085 cur = *chain_signal;
1086 if (!(chains & BIT(0)) ||
1087 cur > 0)
1088 continue;
1089
1090 if (cur > signal)
1091 swap(cur, signal);
1092
1093 diff = signal - cur;
1094 if (diff == 0)
1095 signal += 3;
1096 else if (diff <= 2)
1097 signal += 2;
1098 else if (diff <= 6)
1099 signal += 1;
1100 }
1101
1102 return signal;
1103 }
1104 EXPORT_SYMBOL(mt76_rx_signal);
1105
1106 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1107 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1108 struct ieee80211_hw **hw,
1109 struct ieee80211_sta **sta)
1110 {
1111 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1112 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1113 struct mt76_rx_status mstat;
1114
1115 mstat = *((struct mt76_rx_status *)skb->cb);
1116 memset(status, 0, sizeof(*status));
1117
1118 status->flag = mstat.flag;
1119 status->freq = mstat.freq;
1120 status->enc_flags = mstat.enc_flags;
1121 status->encoding = mstat.encoding;
1122 status->bw = mstat.bw;
1123 if (status->encoding == RX_ENC_EHT) {
1124 status->eht.ru = mstat.eht.ru;
1125 status->eht.gi = mstat.eht.gi;
1126 } else {
1127 status->he_ru = mstat.he_ru;
1128 status->he_gi = mstat.he_gi;
1129 status->he_dcm = mstat.he_dcm;
1130 }
1131 status->rate_idx = mstat.rate_idx;
1132 status->nss = mstat.nss;
1133 status->band = mstat.band;
1134 status->signal = mstat.signal;
1135 status->chains = mstat.chains;
1136 status->ampdu_reference = mstat.ampdu_ref;
1137 status->device_timestamp = mstat.timestamp;
1138 status->mactime = mstat.timestamp;
1139 status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1140 if (status->signal <= -128)
1141 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1142
1143 if (ieee80211_is_beacon(hdr->frame_control) ||
1144 ieee80211_is_probe_resp(hdr->frame_control))
1145 status->boottime_ns = ktime_get_boottime_ns();
1146
1147 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1148 BUILD_BUG_ON(sizeof(status->chain_signal) !=
1149 sizeof(mstat.chain_signal));
1150 memcpy(status->chain_signal, mstat.chain_signal,
1151 sizeof(mstat.chain_signal));
1152
1153 if (mstat.wcid) {
1154 status->link_valid = mstat.wcid->link_valid;
1155 status->link_id = mstat.wcid->link_id;
1156 }
1157
1158 *sta = wcid_to_sta(mstat.wcid);
1159 *hw = mt76_phy_hw(dev, mstat.phy_idx);
1160 }
1161
1162 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1163 mt76_check_ccmp_pn(struct sk_buff *skb)
1164 {
1165 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1166 struct mt76_wcid *wcid = status->wcid;
1167 struct ieee80211_hdr *hdr;
1168 int security_idx;
1169 int ret;
1170
1171 if (!(status->flag & RX_FLAG_DECRYPTED))
1172 return;
1173
1174 if (status->flag & RX_FLAG_ONLY_MONITOR)
1175 return;
1176
1177 if (!wcid || !wcid->rx_check_pn)
1178 return;
1179
1180 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1181 if (status->flag & RX_FLAG_8023)
1182 goto skip_hdr_check;
1183
1184 hdr = mt76_skb_get_hdr(skb);
1185 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1186 /*
1187 * Validate the first fragment both here and in mac80211
1188 * All further fragments will be validated by mac80211 only.
1189 */
1190 if (ieee80211_is_frag(hdr) &&
1191 !ieee80211_is_first_frag(hdr->frame_control))
1192 return;
1193 }
1194
1195 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1196 *
1197 * the recipient shall maintain a single replay counter for received
1198 * individually addressed robust Management frames that are received
1199 * with the To DS subfield equal to 0, [...]
1200 */
1201 if (ieee80211_is_mgmt(hdr->frame_control) &&
1202 !ieee80211_has_tods(hdr->frame_control))
1203 security_idx = IEEE80211_NUM_TIDS;
1204
1205 skip_hdr_check:
1206 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1207 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1208 sizeof(status->iv));
1209 if (ret <= 0) {
1210 status->flag |= RX_FLAG_ONLY_MONITOR;
1211 return;
1212 }
1213
1214 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1215
1216 if (status->flag & RX_FLAG_IV_STRIPPED)
1217 status->flag |= RX_FLAG_PN_VALIDATED;
1218 }
1219
1220 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1221 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1222 int len)
1223 {
1224 struct mt76_wcid *wcid = status->wcid;
1225 struct ieee80211_rx_status info = {
1226 .enc_flags = status->enc_flags,
1227 .rate_idx = status->rate_idx,
1228 .encoding = status->encoding,
1229 .band = status->band,
1230 .nss = status->nss,
1231 .bw = status->bw,
1232 };
1233 struct ieee80211_sta *sta;
1234 u32 airtime;
1235 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1236
1237 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1238 spin_lock(&dev->cc_lock);
1239 dev->cur_cc_bss_rx += airtime;
1240 spin_unlock(&dev->cc_lock);
1241
1242 if (!wcid || !wcid->sta)
1243 return;
1244
1245 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1246 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1247 }
1248
1249 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1250 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1251 {
1252 struct mt76_wcid *wcid;
1253 int wcid_idx;
1254
1255 if (!dev->rx_ampdu_len)
1256 return;
1257
1258 wcid_idx = dev->rx_ampdu_status.wcid_idx;
1259 if (wcid_idx < ARRAY_SIZE(dev->wcid))
1260 wcid = rcu_dereference(dev->wcid[wcid_idx]);
1261 else
1262 wcid = NULL;
1263 dev->rx_ampdu_status.wcid = wcid;
1264
1265 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1266
1267 dev->rx_ampdu_len = 0;
1268 dev->rx_ampdu_ref = 0;
1269 }
1270
1271 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1272 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1273 {
1274 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1275 struct mt76_wcid *wcid = status->wcid;
1276
1277 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1278 return;
1279
1280 if (!wcid || !wcid->sta) {
1281 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1282
1283 if (status->flag & RX_FLAG_8023)
1284 return;
1285
1286 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1287 return;
1288
1289 wcid = NULL;
1290 }
1291
1292 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1293 status->ampdu_ref != dev->rx_ampdu_ref)
1294 mt76_airtime_flush_ampdu(dev);
1295
1296 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1297 if (!dev->rx_ampdu_len ||
1298 status->ampdu_ref != dev->rx_ampdu_ref) {
1299 dev->rx_ampdu_status = *status;
1300 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1301 dev->rx_ampdu_ref = status->ampdu_ref;
1302 }
1303
1304 dev->rx_ampdu_len += skb->len;
1305 return;
1306 }
1307
1308 mt76_airtime_report(dev, status, skb->len);
1309 }
1310
1311 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1312 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1313 {
1314 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1315 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1316 struct ieee80211_sta *sta;
1317 struct ieee80211_hw *hw;
1318 struct mt76_wcid *wcid = status->wcid;
1319 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1320 bool ps;
1321
1322 hw = mt76_phy_hw(dev, status->phy_idx);
1323 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1324 !(status->flag & RX_FLAG_8023)) {
1325 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1326 if (sta)
1327 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1328 }
1329
1330 mt76_airtime_check(dev, skb);
1331
1332 if (!wcid || !wcid->sta)
1333 return;
1334
1335 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1336
1337 if (status->signal <= 0)
1338 ewma_signal_add(&wcid->rssi, -status->signal);
1339
1340 wcid->inactive_count = 0;
1341
1342 if (status->flag & RX_FLAG_8023)
1343 return;
1344
1345 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1346 return;
1347
1348 if (ieee80211_is_pspoll(hdr->frame_control)) {
1349 ieee80211_sta_pspoll(sta);
1350 return;
1351 }
1352
1353 if (ieee80211_has_morefrags(hdr->frame_control) ||
1354 !(ieee80211_is_mgmt(hdr->frame_control) ||
1355 ieee80211_is_data(hdr->frame_control)))
1356 return;
1357
1358 ps = ieee80211_has_pm(hdr->frame_control);
1359
1360 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1361 ieee80211_is_qos_nullfunc(hdr->frame_control)))
1362 ieee80211_sta_uapsd_trigger(sta, tidno);
1363
1364 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1365 return;
1366
1367 if (ps)
1368 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1369
1370 if (dev->drv->sta_ps)
1371 dev->drv->sta_ps(dev, sta, ps);
1372
1373 if (!ps)
1374 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1375
1376 ieee80211_sta_ps_transition(sta, ps);
1377 }
1378
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1379 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1380 struct napi_struct *napi)
1381 {
1382 struct ieee80211_sta *sta;
1383 struct ieee80211_hw *hw;
1384 struct sk_buff *skb, *tmp;
1385 LIST_HEAD(list);
1386
1387 spin_lock(&dev->rx_lock);
1388 while ((skb = __skb_dequeue(frames)) != NULL) {
1389 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1390
1391 mt76_check_ccmp_pn(skb);
1392 skb_shinfo(skb)->frag_list = NULL;
1393 mt76_rx_convert(dev, skb, &hw, &sta);
1394 ieee80211_rx_list(hw, sta, skb, &list);
1395
1396 /* subsequent amsdu frames */
1397 while (nskb) {
1398 skb = nskb;
1399 nskb = nskb->next;
1400 skb->next = NULL;
1401
1402 mt76_rx_convert(dev, skb, &hw, &sta);
1403 ieee80211_rx_list(hw, sta, skb, &list);
1404 }
1405 }
1406 spin_unlock(&dev->rx_lock);
1407
1408 if (!napi) {
1409 netif_receive_skb_list(&list);
1410 return;
1411 }
1412
1413 list_for_each_entry_safe(skb, tmp, &list, list) {
1414 skb_list_del_init(skb);
1415 napi_gro_receive(napi, skb);
1416 }
1417 }
1418
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1419 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1420 struct napi_struct *napi)
1421 {
1422 struct sk_buff_head frames;
1423 struct sk_buff *skb;
1424
1425 __skb_queue_head_init(&frames);
1426
1427 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1428 mt76_check_sta(dev, skb);
1429 if (mtk_wed_device_active(&dev->mmio.wed))
1430 __skb_queue_tail(&frames, skb);
1431 else
1432 mt76_rx_aggr_reorder(skb, &frames);
1433 }
1434
1435 mt76_rx_complete(dev, &frames, napi);
1436 }
1437 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1438
1439 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1440 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1441 struct ieee80211_sta *sta)
1442 {
1443 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1444 struct mt76_dev *dev = phy->dev;
1445 int ret;
1446 int i;
1447
1448 mutex_lock(&dev->mutex);
1449
1450 ret = dev->drv->sta_add(dev, vif, sta);
1451 if (ret)
1452 goto out;
1453
1454 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1455 struct mt76_txq *mtxq;
1456
1457 if (!sta->txq[i])
1458 continue;
1459
1460 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1461 mtxq->wcid = wcid->idx;
1462 }
1463
1464 ewma_signal_init(&wcid->rssi);
1465 if (phy->band_idx == MT_BAND1)
1466 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1467 wcid->phy_idx = phy->band_idx;
1468 rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1469
1470 mt76_wcid_init(wcid);
1471 out:
1472 mutex_unlock(&dev->mutex);
1473
1474 return ret;
1475 }
1476
__mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1477 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1478 struct ieee80211_sta *sta)
1479 {
1480 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1481 int i, idx = wcid->idx;
1482
1483 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1484 mt76_rx_aggr_stop(dev, wcid, i);
1485
1486 if (dev->drv->sta_remove)
1487 dev->drv->sta_remove(dev, vif, sta);
1488
1489 mt76_wcid_cleanup(dev, wcid);
1490
1491 mt76_wcid_mask_clear(dev->wcid_mask, idx);
1492 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1493 }
1494 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1495
1496 static void
mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1497 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1498 struct ieee80211_sta *sta)
1499 {
1500 mutex_lock(&dev->mutex);
1501 __mt76_sta_remove(dev, vif, sta);
1502 mutex_unlock(&dev->mutex);
1503 }
1504
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1505 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1506 struct ieee80211_sta *sta,
1507 enum ieee80211_sta_state old_state,
1508 enum ieee80211_sta_state new_state)
1509 {
1510 struct mt76_phy *phy = hw->priv;
1511 struct mt76_dev *dev = phy->dev;
1512 enum mt76_sta_event ev;
1513
1514 if (old_state == IEEE80211_STA_NOTEXIST &&
1515 new_state == IEEE80211_STA_NONE)
1516 return mt76_sta_add(phy, vif, sta);
1517
1518 if (old_state == IEEE80211_STA_NONE &&
1519 new_state == IEEE80211_STA_NOTEXIST)
1520 mt76_sta_remove(dev, vif, sta);
1521
1522 if (!dev->drv->sta_event)
1523 return 0;
1524
1525 if (old_state == IEEE80211_STA_AUTH &&
1526 new_state == IEEE80211_STA_ASSOC)
1527 ev = MT76_STA_EVENT_ASSOC;
1528 else if (old_state == IEEE80211_STA_ASSOC &&
1529 new_state == IEEE80211_STA_AUTHORIZED)
1530 ev = MT76_STA_EVENT_AUTHORIZE;
1531 else if (old_state == IEEE80211_STA_ASSOC &&
1532 new_state == IEEE80211_STA_AUTH)
1533 ev = MT76_STA_EVENT_DISASSOC;
1534 else
1535 return 0;
1536
1537 return dev->drv->sta_event(dev, vif, sta, ev);
1538 }
1539 EXPORT_SYMBOL_GPL(mt76_sta_state);
1540
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1541 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1542 struct ieee80211_sta *sta)
1543 {
1544 struct mt76_phy *phy = hw->priv;
1545 struct mt76_dev *dev = phy->dev;
1546 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1547
1548 mutex_lock(&dev->mutex);
1549 spin_lock_bh(&dev->status_lock);
1550 rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1551 spin_unlock_bh(&dev->status_lock);
1552 mutex_unlock(&dev->mutex);
1553 }
1554 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1555
mt76_wcid_init(struct mt76_wcid * wcid)1556 void mt76_wcid_init(struct mt76_wcid *wcid)
1557 {
1558 INIT_LIST_HEAD(&wcid->tx_list);
1559 skb_queue_head_init(&wcid->tx_pending);
1560 skb_queue_head_init(&wcid->tx_offchannel);
1561
1562 INIT_LIST_HEAD(&wcid->list);
1563 idr_init(&wcid->pktid);
1564 }
1565 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1566
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1567 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1568 {
1569 struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1570 struct ieee80211_hw *hw;
1571 struct sk_buff_head list;
1572 struct sk_buff *skb;
1573
1574 mt76_tx_status_lock(dev, &list);
1575 mt76_tx_status_skb_get(dev, wcid, -1, &list);
1576 mt76_tx_status_unlock(dev, &list);
1577
1578 idr_destroy(&wcid->pktid);
1579
1580 spin_lock_bh(&phy->tx_lock);
1581
1582 if (!list_empty(&wcid->tx_list))
1583 list_del_init(&wcid->tx_list);
1584
1585 spin_lock(&wcid->tx_pending.lock);
1586 skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1587 spin_unlock(&wcid->tx_pending.lock);
1588
1589 spin_unlock_bh(&phy->tx_lock);
1590
1591 while ((skb = __skb_dequeue(&list)) != NULL) {
1592 hw = mt76_tx_status_get_hw(dev, skb);
1593 ieee80211_free_txskb(hw, skb);
1594 }
1595 }
1596 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1597
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int * dbm)1598 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1599 int *dbm)
1600 {
1601 struct mt76_phy *phy = hw->priv;
1602 int n_chains = hweight16(phy->chainmask);
1603 int delta = mt76_tx_power_nss_delta(n_chains);
1604
1605 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1606
1607 return 0;
1608 }
1609 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1610
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1611 int mt76_init_sar_power(struct ieee80211_hw *hw,
1612 const struct cfg80211_sar_specs *sar)
1613 {
1614 struct mt76_phy *phy = hw->priv;
1615 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1616 int i;
1617
1618 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1619 return -EINVAL;
1620
1621 for (i = 0; i < sar->num_sub_specs; i++) {
1622 u32 index = sar->sub_specs[i].freq_range_index;
1623 /* SAR specifies power limitaton in 0.25dbm */
1624 s32 power = sar->sub_specs[i].power >> 1;
1625
1626 if (power > 127 || power < -127)
1627 power = 127;
1628
1629 phy->frp[index].range = &capa->freq_ranges[index];
1630 phy->frp[index].power = power;
1631 }
1632
1633 return 0;
1634 }
1635 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1636
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1637 int mt76_get_sar_power(struct mt76_phy *phy,
1638 struct ieee80211_channel *chan,
1639 int power)
1640 {
1641 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1642 int freq, i;
1643
1644 if (!capa || !phy->frp)
1645 return power;
1646
1647 if (power > 127 || power < -127)
1648 power = 127;
1649
1650 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1651 for (i = 0 ; i < capa->num_freq_ranges; i++) {
1652 if (phy->frp[i].range &&
1653 freq >= phy->frp[i].range->start_freq &&
1654 freq < phy->frp[i].range->end_freq) {
1655 power = min_t(int, phy->frp[i].power, power);
1656 break;
1657 }
1658 }
1659
1660 return power;
1661 }
1662 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1663
1664 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1665 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1666 {
1667 if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1668 ieee80211_csa_finish(vif, 0);
1669 }
1670
mt76_csa_finish(struct mt76_dev * dev)1671 void mt76_csa_finish(struct mt76_dev *dev)
1672 {
1673 if (!dev->csa_complete)
1674 return;
1675
1676 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1677 IEEE80211_IFACE_ITER_RESUME_ALL,
1678 __mt76_csa_finish, dev);
1679
1680 dev->csa_complete = 0;
1681 }
1682 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1683
1684 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1685 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1686 {
1687 struct mt76_dev *dev = priv;
1688
1689 if (!vif->bss_conf.csa_active)
1690 return;
1691
1692 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1693 }
1694
mt76_csa_check(struct mt76_dev * dev)1695 void mt76_csa_check(struct mt76_dev *dev)
1696 {
1697 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1698 IEEE80211_IFACE_ITER_RESUME_ALL,
1699 __mt76_csa_check, dev);
1700 }
1701 EXPORT_SYMBOL_GPL(mt76_csa_check);
1702
1703 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1704 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1705 {
1706 return 0;
1707 }
1708 EXPORT_SYMBOL_GPL(mt76_set_tim);
1709
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1710 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1711 {
1712 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1713 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1714 u8 *hdr, *pn = status->iv;
1715
1716 __skb_push(skb, 8);
1717 memmove(skb->data, skb->data + 8, hdr_len);
1718 hdr = skb->data + hdr_len;
1719
1720 hdr[0] = pn[5];
1721 hdr[1] = pn[4];
1722 hdr[2] = 0;
1723 hdr[3] = 0x20 | (key_id << 6);
1724 hdr[4] = pn[3];
1725 hdr[5] = pn[2];
1726 hdr[6] = pn[1];
1727 hdr[7] = pn[0];
1728
1729 status->flag &= ~RX_FLAG_IV_STRIPPED;
1730 }
1731 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1732
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1733 int mt76_get_rate(struct mt76_dev *dev,
1734 struct ieee80211_supported_band *sband,
1735 int idx, bool cck)
1736 {
1737 bool is_2g = sband->band == NL80211_BAND_2GHZ;
1738 int i, offset = 0, len = sband->n_bitrates;
1739
1740 if (cck) {
1741 if (!is_2g)
1742 return 0;
1743
1744 idx &= ~BIT(2); /* short preamble */
1745 } else if (is_2g) {
1746 offset = 4;
1747 }
1748
1749 for (i = offset; i < len; i++) {
1750 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1751 return i;
1752 }
1753
1754 return 0;
1755 }
1756 EXPORT_SYMBOL_GPL(mt76_get_rate);
1757
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1758 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1759 const u8 *mac)
1760 {
1761 struct mt76_phy *phy = hw->priv;
1762
1763 set_bit(MT76_SCANNING, &phy->state);
1764 }
1765 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1766
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1767 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1768 {
1769 struct mt76_phy *phy = hw->priv;
1770
1771 clear_bit(MT76_SCANNING, &phy->state);
1772 }
1773 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1774
mt76_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)1775 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1776 {
1777 struct mt76_phy *phy = hw->priv;
1778 struct mt76_dev *dev = phy->dev;
1779
1780 mutex_lock(&dev->mutex);
1781 *tx_ant = phy->antenna_mask;
1782 *rx_ant = phy->antenna_mask;
1783 mutex_unlock(&dev->mutex);
1784
1785 return 0;
1786 }
1787 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1788
1789 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)1790 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1791 int ring_base, void *wed, u32 flags)
1792 {
1793 struct mt76_queue *hwq;
1794 int err;
1795
1796 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1797 if (!hwq)
1798 return ERR_PTR(-ENOMEM);
1799
1800 hwq->flags = flags;
1801 hwq->wed = wed;
1802
1803 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1804 if (err < 0)
1805 return ERR_PTR(err);
1806
1807 return hwq;
1808 }
1809 EXPORT_SYMBOL_GPL(mt76_init_queue);
1810
mt76_calculate_default_rate(struct mt76_phy * phy,struct ieee80211_vif * vif,int rateidx)1811 u16 mt76_calculate_default_rate(struct mt76_phy *phy,
1812 struct ieee80211_vif *vif, int rateidx)
1813 {
1814 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1815 struct cfg80211_chan_def *chandef = mvif->ctx ?
1816 &mvif->ctx->def :
1817 &phy->chandef;
1818 int offset = 0;
1819
1820 if (chandef->chan->band != NL80211_BAND_2GHZ)
1821 offset = 4;
1822
1823 /* pick the lowest rate for hidden nodes */
1824 if (rateidx < 0)
1825 rateidx = 0;
1826
1827 rateidx += offset;
1828 if (rateidx >= ARRAY_SIZE(mt76_rates))
1829 rateidx = offset;
1830
1831 return mt76_rates[rateidx].hw_value;
1832 }
1833 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1834
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1835 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1836 struct mt76_sta_stats *stats, bool eht)
1837 {
1838 int i, ei = wi->initial_stat_idx;
1839 u64 *data = wi->data;
1840
1841 wi->sta_count++;
1842
1843 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1844 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1845 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1846 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1847 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1848 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1849 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1850 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1851 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1852 if (eht) {
1853 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1854 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1855 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1856 }
1857
1858 for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1859 data[ei++] += stats->tx_bw[i];
1860
1861 for (i = 0; i < (eht ? 14 : 12); i++)
1862 data[ei++] += stats->tx_mcs[i];
1863
1864 for (i = 0; i < 4; i++)
1865 data[ei++] += stats->tx_nss[i];
1866
1867 wi->worker_stat_count = ei - wi->initial_stat_idx;
1868 }
1869 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1870
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)1871 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1872 {
1873 #ifdef CONFIG_PAGE_POOL_STATS
1874 struct page_pool_stats stats = {};
1875 int i;
1876
1877 mt76_for_each_q_rx(dev, i)
1878 page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1879
1880 page_pool_ethtool_stats_get(data, &stats);
1881 *index += page_pool_ethtool_stats_get_count();
1882 #endif
1883 }
1884 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1885
mt76_phy_dfs_state(struct mt76_phy * phy)1886 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1887 {
1888 struct ieee80211_hw *hw = phy->hw;
1889 struct mt76_dev *dev = phy->dev;
1890
1891 if (dev->region == NL80211_DFS_UNSET ||
1892 test_bit(MT76_SCANNING, &phy->state))
1893 return MT_DFS_STATE_DISABLED;
1894
1895 if (!hw->conf.radar_enabled) {
1896 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1897 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1898 return MT_DFS_STATE_ACTIVE;
1899
1900 return MT_DFS_STATE_DISABLED;
1901 }
1902
1903 if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1904 return MT_DFS_STATE_CAC;
1905
1906 return MT_DFS_STATE_ACTIVE;
1907 }
1908 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1909