1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 #include <linux/sched.h> 6 #include <linux/of.h> 7 #include "mt76.h" 8 9 #define CHAN2G(_idx, _freq) { \ 10 .band = NL80211_BAND_2GHZ, \ 11 .center_freq = (_freq), \ 12 .hw_value = (_idx), \ 13 .max_power = 30, \ 14 } 15 16 #define CHAN5G(_idx, _freq) { \ 17 .band = NL80211_BAND_5GHZ, \ 18 .center_freq = (_freq), \ 19 .hw_value = (_idx), \ 20 .max_power = 30, \ 21 } 22 23 #define CHAN6G(_idx, _freq) { \ 24 .band = NL80211_BAND_6GHZ, \ 25 .center_freq = (_freq), \ 26 .hw_value = (_idx), \ 27 .max_power = 30, \ 28 } 29 30 static const struct ieee80211_channel mt76_channels_2ghz[] = { 31 CHAN2G(1, 2412), 32 CHAN2G(2, 2417), 33 CHAN2G(3, 2422), 34 CHAN2G(4, 2427), 35 CHAN2G(5, 2432), 36 CHAN2G(6, 2437), 37 CHAN2G(7, 2442), 38 CHAN2G(8, 2447), 39 CHAN2G(9, 2452), 40 CHAN2G(10, 2457), 41 CHAN2G(11, 2462), 42 CHAN2G(12, 2467), 43 CHAN2G(13, 2472), 44 CHAN2G(14, 2484), 45 }; 46 47 static const struct ieee80211_channel mt76_channels_5ghz[] = { 48 CHAN5G(36, 5180), 49 CHAN5G(40, 5200), 50 CHAN5G(44, 5220), 51 CHAN5G(48, 5240), 52 53 CHAN5G(52, 5260), 54 CHAN5G(56, 5280), 55 CHAN5G(60, 5300), 56 CHAN5G(64, 5320), 57 58 CHAN5G(100, 5500), 59 CHAN5G(104, 5520), 60 CHAN5G(108, 5540), 61 CHAN5G(112, 5560), 62 CHAN5G(116, 5580), 63 CHAN5G(120, 5600), 64 CHAN5G(124, 5620), 65 CHAN5G(128, 5640), 66 CHAN5G(132, 5660), 67 CHAN5G(136, 5680), 68 CHAN5G(140, 5700), 69 CHAN5G(144, 5720), 70 71 CHAN5G(149, 5745), 72 CHAN5G(153, 5765), 73 CHAN5G(157, 5785), 74 CHAN5G(161, 5805), 75 CHAN5G(165, 5825), 76 CHAN5G(169, 5845), 77 CHAN5G(173, 5865), 78 }; 79 80 static const struct ieee80211_channel mt76_channels_6ghz[] = { 81 /* UNII-5 */ 82 CHAN6G(1, 5955), 83 CHAN6G(5, 5975), 84 CHAN6G(9, 5995), 85 CHAN6G(13, 6015), 86 CHAN6G(17, 6035), 87 CHAN6G(21, 6055), 88 CHAN6G(25, 6075), 89 CHAN6G(29, 6095), 90 CHAN6G(33, 6115), 91 CHAN6G(37, 6135), 92 CHAN6G(41, 6155), 93 CHAN6G(45, 6175), 94 CHAN6G(49, 6195), 95 CHAN6G(53, 6215), 96 CHAN6G(57, 6235), 97 CHAN6G(61, 6255), 98 CHAN6G(65, 6275), 99 CHAN6G(69, 6295), 100 CHAN6G(73, 6315), 101 CHAN6G(77, 6335), 102 CHAN6G(81, 6355), 103 CHAN6G(85, 6375), 104 CHAN6G(89, 6395), 105 CHAN6G(93, 6415), 106 /* UNII-6 */ 107 CHAN6G(97, 6435), 108 CHAN6G(101, 6455), 109 CHAN6G(105, 6475), 110 CHAN6G(109, 6495), 111 CHAN6G(113, 6515), 112 CHAN6G(117, 6535), 113 /* UNII-7 */ 114 CHAN6G(121, 6555), 115 CHAN6G(125, 6575), 116 CHAN6G(129, 6595), 117 CHAN6G(133, 6615), 118 CHAN6G(137, 6635), 119 CHAN6G(141, 6655), 120 CHAN6G(145, 6675), 121 CHAN6G(149, 6695), 122 CHAN6G(153, 6715), 123 CHAN6G(157, 6735), 124 CHAN6G(161, 6755), 125 CHAN6G(165, 6775), 126 CHAN6G(169, 6795), 127 CHAN6G(173, 6815), 128 CHAN6G(177, 6835), 129 CHAN6G(181, 6855), 130 CHAN6G(185, 6875), 131 /* UNII-8 */ 132 CHAN6G(189, 6895), 133 CHAN6G(193, 6915), 134 CHAN6G(197, 6935), 135 CHAN6G(201, 6955), 136 CHAN6G(205, 6975), 137 CHAN6G(209, 6995), 138 CHAN6G(213, 7015), 139 CHAN6G(217, 7035), 140 CHAN6G(221, 7055), 141 CHAN6G(225, 7075), 142 CHAN6G(229, 7095), 143 CHAN6G(233, 7115), 144 }; 145 146 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { 147 { .throughput = 0 * 1024, .blink_time = 334 }, 148 { .throughput = 1 * 1024, .blink_time = 260 }, 149 { .throughput = 5 * 1024, .blink_time = 220 }, 150 { .throughput = 10 * 1024, .blink_time = 190 }, 151 { .throughput = 20 * 1024, .blink_time = 170 }, 152 { .throughput = 50 * 1024, .blink_time = 150 }, 153 { .throughput = 70 * 1024, .blink_time = 130 }, 154 { .throughput = 100 * 1024, .blink_time = 110 }, 155 { .throughput = 200 * 1024, .blink_time = 80 }, 156 { .throughput = 300 * 1024, .blink_time = 50 }, 157 }; 158 159 struct ieee80211_rate mt76_rates[] = { 160 CCK_RATE(0, 10), 161 CCK_RATE(1, 20), 162 CCK_RATE(2, 55), 163 CCK_RATE(3, 110), 164 OFDM_RATE(11, 60), 165 OFDM_RATE(15, 90), 166 OFDM_RATE(10, 120), 167 OFDM_RATE(14, 180), 168 OFDM_RATE(9, 240), 169 OFDM_RATE(13, 360), 170 OFDM_RATE(8, 480), 171 OFDM_RATE(12, 540), 172 }; 173 EXPORT_SYMBOL_GPL(mt76_rates); 174 175 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = { 176 { .start_freq = 2402, .end_freq = 2494, }, 177 { .start_freq = 5150, .end_freq = 5350, }, 178 { .start_freq = 5350, .end_freq = 5470, }, 179 { .start_freq = 5470, .end_freq = 5725, }, 180 { .start_freq = 5725, .end_freq = 5950, }, 181 }; 182 183 static const struct cfg80211_sar_capa mt76_sar_capa = { 184 .type = NL80211_SAR_TYPE_POWER, 185 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges), 186 .freq_ranges = &mt76_sar_freq_ranges[0], 187 }; 188 189 static int mt76_led_init(struct mt76_dev *dev) 190 { 191 struct device_node *np = dev->dev->of_node; 192 struct ieee80211_hw *hw = dev->hw; 193 int led_pin; 194 195 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) 196 return 0; 197 198 snprintf(dev->led_name, sizeof(dev->led_name), 199 "mt76-%s", wiphy_name(hw->wiphy)); 200 201 dev->led_cdev.name = dev->led_name; 202 dev->led_cdev.default_trigger = 203 ieee80211_create_tpt_led_trigger(hw, 204 IEEE80211_TPT_LEDTRIG_FL_RADIO, 205 mt76_tpt_blink, 206 ARRAY_SIZE(mt76_tpt_blink)); 207 208 np = of_get_child_by_name(np, "led"); 209 if (np) { 210 if (!of_property_read_u32(np, "led-sources", &led_pin)) 211 dev->led_pin = led_pin; 212 dev->led_al = of_property_read_bool(np, "led-active-low"); 213 } 214 215 return led_classdev_register(dev->dev, &dev->led_cdev); 216 } 217 218 static void mt76_led_cleanup(struct mt76_dev *dev) 219 { 220 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) 221 return; 222 223 led_classdev_unregister(&dev->led_cdev); 224 } 225 226 static void mt76_init_stream_cap(struct mt76_phy *phy, 227 struct ieee80211_supported_band *sband, 228 bool vht) 229 { 230 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; 231 int i, nstream = hweight8(phy->antenna_mask); 232 struct ieee80211_sta_vht_cap *vht_cap; 233 u16 mcs_map = 0; 234 235 if (nstream > 1) 236 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; 237 else 238 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; 239 240 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) 241 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0; 242 243 if (!vht) 244 return; 245 246 vht_cap = &sband->vht_cap; 247 if (nstream > 1) 248 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 249 else 250 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC; 251 252 for (i = 0; i < 8; i++) { 253 if (i < nstream) 254 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2)); 255 else 256 mcs_map |= 257 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2)); 258 } 259 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 260 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 261 } 262 263 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) 264 { 265 if (phy->cap.has_2ghz) 266 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); 267 if (phy->cap.has_5ghz) 268 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); 269 if (phy->cap.has_6ghz) 270 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht); 271 } 272 EXPORT_SYMBOL_GPL(mt76_set_stream_caps); 273 274 static int 275 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, 276 const struct ieee80211_channel *chan, int n_chan, 277 struct ieee80211_rate *rates, int n_rates, 278 bool ht, bool vht) 279 { 280 struct ieee80211_supported_band *sband = &msband->sband; 281 struct ieee80211_sta_vht_cap *vht_cap; 282 struct ieee80211_sta_ht_cap *ht_cap; 283 struct mt76_dev *dev = phy->dev; 284 void *chanlist; 285 int size; 286 287 size = n_chan * sizeof(*chan); 288 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); 289 if (!chanlist) 290 return -ENOMEM; 291 292 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan), 293 GFP_KERNEL); 294 if (!msband->chan) 295 return -ENOMEM; 296 297 sband->channels = chanlist; 298 sband->n_channels = n_chan; 299 sband->bitrates = rates; 300 sband->n_bitrates = n_rates; 301 302 if (!ht) 303 return 0; 304 305 ht_cap = &sband->ht_cap; 306 ht_cap->ht_supported = true; 307 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 308 IEEE80211_HT_CAP_GRN_FLD | 309 IEEE80211_HT_CAP_SGI_20 | 310 IEEE80211_HT_CAP_SGI_40 | 311 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 312 313 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 314 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 315 316 mt76_init_stream_cap(phy, sband, vht); 317 318 if (!vht) 319 return 0; 320 321 vht_cap = &sband->vht_cap; 322 vht_cap->vht_supported = true; 323 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | 324 IEEE80211_VHT_CAP_RXSTBC_1 | 325 IEEE80211_VHT_CAP_SHORT_GI_80 | 326 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | 327 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | 328 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); 329 330 return 0; 331 } 332 333 static int 334 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates, 335 int n_rates) 336 { 337 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband; 338 339 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz, 340 ARRAY_SIZE(mt76_channels_2ghz), rates, 341 n_rates, true, false); 342 } 343 344 static int 345 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates, 346 int n_rates, bool vht) 347 { 348 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband; 349 350 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz, 351 ARRAY_SIZE(mt76_channels_5ghz), rates, 352 n_rates, true, vht); 353 } 354 355 static int 356 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates, 357 int n_rates) 358 { 359 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband; 360 361 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz, 362 ARRAY_SIZE(mt76_channels_6ghz), rates, 363 n_rates, false, false); 364 } 365 366 static void 367 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband, 368 enum nl80211_band band) 369 { 370 struct ieee80211_supported_band *sband = &msband->sband; 371 bool found = false; 372 int i; 373 374 if (!sband) 375 return; 376 377 for (i = 0; i < sband->n_channels; i++) { 378 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED) 379 continue; 380 381 found = true; 382 break; 383 } 384 385 if (found) { 386 phy->chandef.chan = &sband->channels[0]; 387 phy->chan_state = &msband->chan[0]; 388 return; 389 } 390 391 sband->n_channels = 0; 392 phy->hw->wiphy->bands[band] = NULL; 393 } 394 395 static int 396 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) 397 { 398 struct mt76_dev *dev = phy->dev; 399 struct wiphy *wiphy = hw->wiphy; 400 401 SET_IEEE80211_DEV(hw, dev->dev); 402 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); 403 404 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 405 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | 406 WIPHY_FLAG_SUPPORTS_TDLS | 407 WIPHY_FLAG_AP_UAPSD; 408 409 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 410 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); 411 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL); 412 413 wiphy->available_antennas_tx = phy->antenna_mask; 414 wiphy->available_antennas_rx = phy->antenna_mask; 415 416 wiphy->sar_capa = &mt76_sar_capa; 417 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges, 418 sizeof(struct mt76_freq_range_power), 419 GFP_KERNEL); 420 if (!phy->frp) 421 return -ENOMEM; 422 423 hw->txq_data_size = sizeof(struct mt76_txq); 424 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; 425 426 if (!hw->max_tx_fragments) 427 hw->max_tx_fragments = 16; 428 429 ieee80211_hw_set(hw, SIGNAL_DBM); 430 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 431 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); 432 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 433 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 434 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 435 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 436 ieee80211_hw_set(hw, TX_AMSDU); 437 ieee80211_hw_set(hw, TX_FRAG_LIST); 438 ieee80211_hw_set(hw, MFP_CAPABLE); 439 ieee80211_hw_set(hw, AP_LINK_PS); 440 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 441 442 return 0; 443 } 444 445 struct mt76_phy * 446 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 447 const struct ieee80211_ops *ops) 448 { 449 struct ieee80211_hw *hw; 450 unsigned int phy_size; 451 struct mt76_phy *phy; 452 453 phy_size = ALIGN(sizeof(*phy), 8); 454 hw = ieee80211_alloc_hw(size + phy_size, ops); 455 if (!hw) 456 return NULL; 457 458 phy = hw->priv; 459 phy->dev = dev; 460 phy->hw = hw; 461 phy->priv = hw->priv + phy_size; 462 463 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 464 hw->wiphy->interface_modes = 465 BIT(NL80211_IFTYPE_STATION) | 466 BIT(NL80211_IFTYPE_AP) | 467 #ifdef CONFIG_MAC80211_MESH 468 BIT(NL80211_IFTYPE_MESH_POINT) | 469 #endif 470 BIT(NL80211_IFTYPE_P2P_CLIENT) | 471 BIT(NL80211_IFTYPE_P2P_GO) | 472 BIT(NL80211_IFTYPE_ADHOC); 473 474 return phy; 475 } 476 EXPORT_SYMBOL_GPL(mt76_alloc_phy); 477 478 int mt76_register_phy(struct mt76_phy *phy, bool vht, 479 struct ieee80211_rate *rates, int n_rates) 480 { 481 int ret; 482 483 ret = mt76_phy_init(phy, phy->hw); 484 if (ret) 485 return ret; 486 487 if (phy->cap.has_2ghz) { 488 ret = mt76_init_sband_2g(phy, rates, n_rates); 489 if (ret) 490 return ret; 491 } 492 493 if (phy->cap.has_5ghz) { 494 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 495 if (ret) 496 return ret; 497 } 498 499 if (phy->cap.has_6ghz) { 500 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); 501 if (ret) 502 return ret; 503 } 504 505 wiphy_read_of_freq_limits(phy->hw->wiphy); 506 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ); 507 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ); 508 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ); 509 510 ret = ieee80211_register_hw(phy->hw); 511 if (ret) 512 return ret; 513 514 phy->dev->phy2 = phy; 515 516 return 0; 517 } 518 EXPORT_SYMBOL_GPL(mt76_register_phy); 519 520 void mt76_unregister_phy(struct mt76_phy *phy) 521 { 522 struct mt76_dev *dev = phy->dev; 523 524 mt76_tx_status_check(dev, true); 525 ieee80211_unregister_hw(phy->hw); 526 dev->phy2 = NULL; 527 } 528 EXPORT_SYMBOL_GPL(mt76_unregister_phy); 529 530 struct mt76_dev * 531 mt76_alloc_device(struct device *pdev, unsigned int size, 532 const struct ieee80211_ops *ops, 533 const struct mt76_driver_ops *drv_ops) 534 { 535 struct ieee80211_hw *hw; 536 struct mt76_phy *phy; 537 struct mt76_dev *dev; 538 int i; 539 540 hw = ieee80211_alloc_hw(size, ops); 541 if (!hw) 542 return NULL; 543 544 dev = hw->priv; 545 dev->hw = hw; 546 dev->dev = pdev; 547 dev->drv = drv_ops; 548 549 phy = &dev->phy; 550 phy->dev = dev; 551 phy->hw = hw; 552 553 spin_lock_init(&dev->rx_lock); 554 spin_lock_init(&dev->lock); 555 spin_lock_init(&dev->cc_lock); 556 spin_lock_init(&dev->status_lock); 557 mutex_init(&dev->mutex); 558 init_waitqueue_head(&dev->tx_wait); 559 560 skb_queue_head_init(&dev->mcu.res_q); 561 init_waitqueue_head(&dev->mcu.wait); 562 mutex_init(&dev->mcu.mutex); 563 dev->tx_worker.fn = mt76_tx_worker; 564 565 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 566 hw->wiphy->interface_modes = 567 BIT(NL80211_IFTYPE_STATION) | 568 BIT(NL80211_IFTYPE_AP) | 569 #ifdef CONFIG_MAC80211_MESH 570 BIT(NL80211_IFTYPE_MESH_POINT) | 571 #endif 572 BIT(NL80211_IFTYPE_P2P_CLIENT) | 573 BIT(NL80211_IFTYPE_P2P_GO) | 574 BIT(NL80211_IFTYPE_ADHOC); 575 576 spin_lock_init(&dev->token_lock); 577 idr_init(&dev->token); 578 579 INIT_LIST_HEAD(&dev->wcid_list); 580 581 INIT_LIST_HEAD(&dev->txwi_cache); 582 583 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) 584 skb_queue_head_init(&dev->rx_skb[i]); 585 586 dev->wq = alloc_ordered_workqueue("mt76", 0); 587 if (!dev->wq) { 588 ieee80211_free_hw(hw); 589 return NULL; 590 } 591 592 return dev; 593 } 594 EXPORT_SYMBOL_GPL(mt76_alloc_device); 595 596 int mt76_register_device(struct mt76_dev *dev, bool vht, 597 struct ieee80211_rate *rates, int n_rates) 598 { 599 struct ieee80211_hw *hw = dev->hw; 600 struct mt76_phy *phy = &dev->phy; 601 int ret; 602 603 dev_set_drvdata(dev->dev, dev); 604 ret = mt76_phy_init(phy, hw); 605 if (ret) 606 return ret; 607 608 if (phy->cap.has_2ghz) { 609 ret = mt76_init_sband_2g(phy, rates, n_rates); 610 if (ret) 611 return ret; 612 } 613 614 if (phy->cap.has_5ghz) { 615 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 616 if (ret) 617 return ret; 618 } 619 620 if (phy->cap.has_6ghz) { 621 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); 622 if (ret) 623 return ret; 624 } 625 626 wiphy_read_of_freq_limits(hw->wiphy); 627 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ); 628 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ); 629 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ); 630 631 if (IS_ENABLED(CONFIG_MT76_LEDS)) { 632 ret = mt76_led_init(dev); 633 if (ret) 634 return ret; 635 } 636 637 ret = ieee80211_register_hw(hw); 638 if (ret) 639 return ret; 640 641 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx")); 642 sched_set_fifo_low(dev->tx_worker.task); 643 644 return 0; 645 } 646 EXPORT_SYMBOL_GPL(mt76_register_device); 647 648 void mt76_unregister_device(struct mt76_dev *dev) 649 { 650 struct ieee80211_hw *hw = dev->hw; 651 652 if (IS_ENABLED(CONFIG_MT76_LEDS)) 653 mt76_led_cleanup(dev); 654 mt76_tx_status_check(dev, true); 655 ieee80211_unregister_hw(hw); 656 } 657 EXPORT_SYMBOL_GPL(mt76_unregister_device); 658 659 void mt76_free_device(struct mt76_dev *dev) 660 { 661 mt76_worker_teardown(&dev->tx_worker); 662 if (dev->wq) { 663 destroy_workqueue(dev->wq); 664 dev->wq = NULL; 665 } 666 ieee80211_free_hw(dev->hw); 667 } 668 EXPORT_SYMBOL_GPL(mt76_free_device); 669 670 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) 671 { 672 struct sk_buff *skb = phy->rx_amsdu[q].head; 673 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 674 struct mt76_dev *dev = phy->dev; 675 676 phy->rx_amsdu[q].head = NULL; 677 phy->rx_amsdu[q].tail = NULL; 678 679 /* 680 * Validate if the amsdu has a proper first subframe. 681 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU 682 * flag of the QoS header gets flipped. In such cases, the first 683 * subframe has a LLC/SNAP header in the location of the destination 684 * address. 685 */ 686 if (skb_shinfo(skb)->frag_list) { 687 int offset = 0; 688 689 if (!(status->flag & RX_FLAG_8023)) { 690 offset = ieee80211_get_hdrlen_from_skb(skb); 691 692 if ((status->flag & 693 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) == 694 RX_FLAG_DECRYPTED) 695 offset += 8; 696 } 697 698 if (ether_addr_equal(skb->data + offset, rfc1042_header)) { 699 dev_kfree_skb(skb); 700 return; 701 } 702 } 703 __skb_queue_tail(&dev->rx_skb[q], skb); 704 } 705 706 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q, 707 struct sk_buff *skb) 708 { 709 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 710 711 if (phy->rx_amsdu[q].head && 712 (!status->amsdu || status->first_amsdu || 713 status->seqno != phy->rx_amsdu[q].seqno)) 714 mt76_rx_release_amsdu(phy, q); 715 716 if (!phy->rx_amsdu[q].head) { 717 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list; 718 phy->rx_amsdu[q].seqno = status->seqno; 719 phy->rx_amsdu[q].head = skb; 720 } else { 721 *phy->rx_amsdu[q].tail = skb; 722 phy->rx_amsdu[q].tail = &skb->next; 723 } 724 725 if (!status->amsdu || status->last_amsdu) 726 mt76_rx_release_amsdu(phy, q); 727 } 728 729 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) 730 { 731 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 732 struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy); 733 734 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) { 735 dev_kfree_skb(skb); 736 return; 737 } 738 739 #ifdef CONFIG_NL80211_TESTMODE 740 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) { 741 phy->test.rx_stats.packets[q]++; 742 if (status->flag & RX_FLAG_FAILED_FCS_CRC) 743 phy->test.rx_stats.fcs_error[q]++; 744 } 745 #endif 746 747 mt76_rx_release_burst(phy, q, skb); 748 } 749 EXPORT_SYMBOL_GPL(mt76_rx); 750 751 bool mt76_has_tx_pending(struct mt76_phy *phy) 752 { 753 struct mt76_queue *q; 754 int i; 755 756 for (i = 0; i < __MT_TXQ_MAX; i++) { 757 q = phy->q_tx[i]; 758 if (q && q->queued) 759 return true; 760 } 761 762 return false; 763 } 764 EXPORT_SYMBOL_GPL(mt76_has_tx_pending); 765 766 static struct mt76_channel_state * 767 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c) 768 { 769 struct mt76_sband *msband; 770 int idx; 771 772 if (c->band == NL80211_BAND_2GHZ) 773 msband = &phy->sband_2g; 774 else if (c->band == NL80211_BAND_6GHZ) 775 msband = &phy->sband_6g; 776 else 777 msband = &phy->sband_5g; 778 779 idx = c - &msband->sband.channels[0]; 780 return &msband->chan[idx]; 781 } 782 783 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) 784 { 785 struct mt76_channel_state *state = phy->chan_state; 786 787 state->cc_active += ktime_to_us(ktime_sub(time, 788 phy->survey_time)); 789 phy->survey_time = time; 790 } 791 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time); 792 793 void mt76_update_survey(struct mt76_phy *phy) 794 { 795 struct mt76_dev *dev = phy->dev; 796 ktime_t cur_time; 797 798 if (dev->drv->update_survey) 799 dev->drv->update_survey(phy); 800 801 cur_time = ktime_get_boottime(); 802 mt76_update_survey_active_time(phy, cur_time); 803 804 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) { 805 struct mt76_channel_state *state = phy->chan_state; 806 807 spin_lock_bh(&dev->cc_lock); 808 state->cc_bss_rx += dev->cur_cc_bss_rx; 809 dev->cur_cc_bss_rx = 0; 810 spin_unlock_bh(&dev->cc_lock); 811 } 812 } 813 EXPORT_SYMBOL_GPL(mt76_update_survey); 814 815 void mt76_set_channel(struct mt76_phy *phy) 816 { 817 struct mt76_dev *dev = phy->dev; 818 struct ieee80211_hw *hw = phy->hw; 819 struct cfg80211_chan_def *chandef = &hw->conf.chandef; 820 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; 821 int timeout = HZ / 5; 822 823 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout); 824 mt76_update_survey(phy); 825 826 if (phy->chandef.chan->center_freq != chandef->chan->center_freq || 827 phy->chandef.width != chandef->width) 828 phy->dfs_state = MT_DFS_STATE_UNKNOWN; 829 830 phy->chandef = *chandef; 831 phy->chan_state = mt76_channel_state(phy, chandef->chan); 832 833 if (!offchannel) 834 phy->main_chan = chandef->chan; 835 836 if (chandef->chan != phy->main_chan) 837 memset(phy->chan_state, 0, sizeof(*phy->chan_state)); 838 } 839 EXPORT_SYMBOL_GPL(mt76_set_channel); 840 841 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 842 struct survey_info *survey) 843 { 844 struct mt76_phy *phy = hw->priv; 845 struct mt76_dev *dev = phy->dev; 846 struct mt76_sband *sband; 847 struct ieee80211_channel *chan; 848 struct mt76_channel_state *state; 849 int ret = 0; 850 851 mutex_lock(&dev->mutex); 852 if (idx == 0 && dev->drv->update_survey) 853 mt76_update_survey(phy); 854 855 if (idx >= phy->sband_2g.sband.n_channels + 856 phy->sband_5g.sband.n_channels) { 857 idx -= (phy->sband_2g.sband.n_channels + 858 phy->sband_5g.sband.n_channels); 859 sband = &phy->sband_6g; 860 } else if (idx >= phy->sband_2g.sband.n_channels) { 861 idx -= phy->sband_2g.sband.n_channels; 862 sband = &phy->sband_5g; 863 } else { 864 sband = &phy->sband_2g; 865 } 866 867 if (idx >= sband->sband.n_channels) { 868 ret = -ENOENT; 869 goto out; 870 } 871 872 chan = &sband->sband.channels[idx]; 873 state = mt76_channel_state(phy, chan); 874 875 memset(survey, 0, sizeof(*survey)); 876 survey->channel = chan; 877 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; 878 survey->filled |= dev->drv->survey_flags; 879 if (state->noise) 880 survey->filled |= SURVEY_INFO_NOISE_DBM; 881 882 if (chan == phy->main_chan) { 883 survey->filled |= SURVEY_INFO_IN_USE; 884 885 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) 886 survey->filled |= SURVEY_INFO_TIME_BSS_RX; 887 } 888 889 survey->time_busy = div_u64(state->cc_busy, 1000); 890 survey->time_rx = div_u64(state->cc_rx, 1000); 891 survey->time = div_u64(state->cc_active, 1000); 892 survey->noise = state->noise; 893 894 spin_lock_bh(&dev->cc_lock); 895 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000); 896 survey->time_tx = div_u64(state->cc_tx, 1000); 897 spin_unlock_bh(&dev->cc_lock); 898 899 out: 900 mutex_unlock(&dev->mutex); 901 902 return ret; 903 } 904 EXPORT_SYMBOL_GPL(mt76_get_survey); 905 906 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 907 struct ieee80211_key_conf *key) 908 { 909 struct ieee80211_key_seq seq; 910 int i; 911 912 wcid->rx_check_pn = false; 913 914 if (!key) 915 return; 916 917 if (key->cipher != WLAN_CIPHER_SUITE_CCMP) 918 return; 919 920 wcid->rx_check_pn = true; 921 922 /* data frame */ 923 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 924 ieee80211_get_key_rx_seq(key, i, &seq); 925 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); 926 } 927 928 /* robust management frame */ 929 ieee80211_get_key_rx_seq(key, -1, &seq); 930 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); 931 932 } 933 EXPORT_SYMBOL(mt76_wcid_key_setup); 934 935 static void 936 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, 937 struct ieee80211_hw **hw, 938 struct ieee80211_sta **sta) 939 { 940 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 941 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 942 struct mt76_rx_status mstat; 943 944 mstat = *((struct mt76_rx_status *)skb->cb); 945 memset(status, 0, sizeof(*status)); 946 947 status->flag = mstat.flag; 948 status->freq = mstat.freq; 949 status->enc_flags = mstat.enc_flags; 950 status->encoding = mstat.encoding; 951 status->bw = mstat.bw; 952 status->he_ru = mstat.he_ru; 953 status->he_gi = mstat.he_gi; 954 status->he_dcm = mstat.he_dcm; 955 status->rate_idx = mstat.rate_idx; 956 status->nss = mstat.nss; 957 status->band = mstat.band; 958 status->signal = mstat.signal; 959 status->chains = mstat.chains; 960 status->ampdu_reference = mstat.ampdu_ref; 961 status->device_timestamp = mstat.timestamp; 962 status->mactime = mstat.timestamp; 963 964 if (ieee80211_is_beacon(hdr->frame_control) || 965 ieee80211_is_probe_resp(hdr->frame_control)) 966 status->boottime_ns = ktime_get_boottime_ns(); 967 968 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); 969 BUILD_BUG_ON(sizeof(status->chain_signal) != 970 sizeof(mstat.chain_signal)); 971 memcpy(status->chain_signal, mstat.chain_signal, 972 sizeof(mstat.chain_signal)); 973 974 *sta = wcid_to_sta(mstat.wcid); 975 *hw = mt76_phy_hw(dev, mstat.ext_phy); 976 } 977 978 static int 979 mt76_check_ccmp_pn(struct sk_buff *skb) 980 { 981 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 982 struct mt76_wcid *wcid = status->wcid; 983 struct ieee80211_hdr *hdr; 984 int security_idx; 985 int ret; 986 987 if (!(status->flag & RX_FLAG_DECRYPTED)) 988 return 0; 989 990 if (!wcid || !wcid->rx_check_pn) 991 return 0; 992 993 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 994 if (status->flag & RX_FLAG_8023) 995 goto skip_hdr_check; 996 997 hdr = mt76_skb_get_hdr(skb); 998 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 999 /* 1000 * Validate the first fragment both here and in mac80211 1001 * All further fragments will be validated by mac80211 only. 1002 */ 1003 if (ieee80211_is_frag(hdr) && 1004 !ieee80211_is_first_frag(hdr->frame_control)) 1005 return 0; 1006 } 1007 1008 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c): 1009 * 1010 * the recipient shall maintain a single replay counter for received 1011 * individually addressed robust Management frames that are received 1012 * with the To DS subfield equal to 0, [...] 1013 */ 1014 if (ieee80211_is_mgmt(hdr->frame_control) && 1015 !ieee80211_has_tods(hdr->frame_control)) 1016 security_idx = IEEE80211_NUM_TIDS; 1017 1018 skip_hdr_check: 1019 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); 1020 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx], 1021 sizeof(status->iv)); 1022 if (ret <= 0) 1023 return -EINVAL; /* replay */ 1024 1025 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv)); 1026 1027 if (status->flag & RX_FLAG_IV_STRIPPED) 1028 status->flag |= RX_FLAG_PN_VALIDATED; 1029 1030 return 0; 1031 } 1032 1033 static void 1034 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, 1035 int len) 1036 { 1037 struct mt76_wcid *wcid = status->wcid; 1038 struct ieee80211_rx_status info = { 1039 .enc_flags = status->enc_flags, 1040 .rate_idx = status->rate_idx, 1041 .encoding = status->encoding, 1042 .band = status->band, 1043 .nss = status->nss, 1044 .bw = status->bw, 1045 }; 1046 struct ieee80211_sta *sta; 1047 u32 airtime; 1048 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1049 1050 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len); 1051 spin_lock(&dev->cc_lock); 1052 dev->cur_cc_bss_rx += airtime; 1053 spin_unlock(&dev->cc_lock); 1054 1055 if (!wcid || !wcid->sta) 1056 return; 1057 1058 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 1059 ieee80211_sta_register_airtime(sta, tidno, 0, airtime); 1060 } 1061 1062 static void 1063 mt76_airtime_flush_ampdu(struct mt76_dev *dev) 1064 { 1065 struct mt76_wcid *wcid; 1066 int wcid_idx; 1067 1068 if (!dev->rx_ampdu_len) 1069 return; 1070 1071 wcid_idx = dev->rx_ampdu_status.wcid_idx; 1072 if (wcid_idx < ARRAY_SIZE(dev->wcid)) 1073 wcid = rcu_dereference(dev->wcid[wcid_idx]); 1074 else 1075 wcid = NULL; 1076 dev->rx_ampdu_status.wcid = wcid; 1077 1078 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len); 1079 1080 dev->rx_ampdu_len = 0; 1081 dev->rx_ampdu_ref = 0; 1082 } 1083 1084 static void 1085 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) 1086 { 1087 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1088 struct mt76_wcid *wcid = status->wcid; 1089 1090 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)) 1091 return; 1092 1093 if (!wcid || !wcid->sta) { 1094 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 1095 1096 if (status->flag & RX_FLAG_8023) 1097 return; 1098 1099 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr)) 1100 return; 1101 1102 wcid = NULL; 1103 } 1104 1105 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) || 1106 status->ampdu_ref != dev->rx_ampdu_ref) 1107 mt76_airtime_flush_ampdu(dev); 1108 1109 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 1110 if (!dev->rx_ampdu_len || 1111 status->ampdu_ref != dev->rx_ampdu_ref) { 1112 dev->rx_ampdu_status = *status; 1113 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff; 1114 dev->rx_ampdu_ref = status->ampdu_ref; 1115 } 1116 1117 dev->rx_ampdu_len += skb->len; 1118 return; 1119 } 1120 1121 mt76_airtime_report(dev, status, skb->len); 1122 } 1123 1124 static void 1125 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) 1126 { 1127 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1128 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 1129 struct ieee80211_sta *sta; 1130 struct ieee80211_hw *hw; 1131 struct mt76_wcid *wcid = status->wcid; 1132 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1133 bool ps; 1134 1135 hw = mt76_phy_hw(dev, status->ext_phy); 1136 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid && 1137 !(status->flag & RX_FLAG_8023)) { 1138 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); 1139 if (sta) 1140 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv; 1141 } 1142 1143 mt76_airtime_check(dev, skb); 1144 1145 if (!wcid || !wcid->sta) 1146 return; 1147 1148 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 1149 1150 if (status->signal <= 0) 1151 ewma_signal_add(&wcid->rssi, -status->signal); 1152 1153 wcid->inactive_count = 0; 1154 1155 if (status->flag & RX_FLAG_8023) 1156 return; 1157 1158 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) 1159 return; 1160 1161 if (ieee80211_is_pspoll(hdr->frame_control)) { 1162 ieee80211_sta_pspoll(sta); 1163 return; 1164 } 1165 1166 if (ieee80211_has_morefrags(hdr->frame_control) || 1167 !(ieee80211_is_mgmt(hdr->frame_control) || 1168 ieee80211_is_data(hdr->frame_control))) 1169 return; 1170 1171 ps = ieee80211_has_pm(hdr->frame_control); 1172 1173 if (ps && (ieee80211_is_data_qos(hdr->frame_control) || 1174 ieee80211_is_qos_nullfunc(hdr->frame_control))) 1175 ieee80211_sta_uapsd_trigger(sta, tidno); 1176 1177 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) 1178 return; 1179 1180 if (ps) 1181 set_bit(MT_WCID_FLAG_PS, &wcid->flags); 1182 1183 dev->drv->sta_ps(dev, sta, ps); 1184 1185 if (!ps) 1186 clear_bit(MT_WCID_FLAG_PS, &wcid->flags); 1187 1188 ieee80211_sta_ps_transition(sta, ps); 1189 } 1190 1191 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 1192 struct napi_struct *napi) 1193 { 1194 struct ieee80211_sta *sta; 1195 struct ieee80211_hw *hw; 1196 struct sk_buff *skb, *tmp; 1197 LIST_HEAD(list); 1198 1199 spin_lock(&dev->rx_lock); 1200 while ((skb = __skb_dequeue(frames)) != NULL) { 1201 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1202 1203 if (mt76_check_ccmp_pn(skb)) { 1204 dev_kfree_skb(skb); 1205 continue; 1206 } 1207 1208 skb_shinfo(skb)->frag_list = NULL; 1209 mt76_rx_convert(dev, skb, &hw, &sta); 1210 ieee80211_rx_list(hw, sta, skb, &list); 1211 1212 /* subsequent amsdu frames */ 1213 while (nskb) { 1214 skb = nskb; 1215 nskb = nskb->next; 1216 skb->next = NULL; 1217 1218 mt76_rx_convert(dev, skb, &hw, &sta); 1219 ieee80211_rx_list(hw, sta, skb, &list); 1220 } 1221 } 1222 spin_unlock(&dev->rx_lock); 1223 1224 if (!napi) { 1225 netif_receive_skb_list(&list); 1226 return; 1227 } 1228 1229 list_for_each_entry_safe(skb, tmp, &list, list) { 1230 skb_list_del_init(skb); 1231 napi_gro_receive(napi, skb); 1232 } 1233 } 1234 1235 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1236 struct napi_struct *napi) 1237 { 1238 struct sk_buff_head frames; 1239 struct sk_buff *skb; 1240 1241 __skb_queue_head_init(&frames); 1242 1243 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { 1244 mt76_check_sta(dev, skb); 1245 mt76_rx_aggr_reorder(skb, &frames); 1246 } 1247 1248 mt76_rx_complete(dev, &frames, napi); 1249 } 1250 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete); 1251 1252 static int 1253 mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif, 1254 struct ieee80211_sta *sta, bool ext_phy) 1255 { 1256 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1257 int ret; 1258 int i; 1259 1260 mutex_lock(&dev->mutex); 1261 1262 ret = dev->drv->sta_add(dev, vif, sta); 1263 if (ret) 1264 goto out; 1265 1266 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1267 struct mt76_txq *mtxq; 1268 1269 if (!sta->txq[i]) 1270 continue; 1271 1272 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; 1273 mtxq->wcid = wcid; 1274 } 1275 1276 ewma_signal_init(&wcid->rssi); 1277 if (ext_phy) 1278 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx); 1279 wcid->ext_phy = ext_phy; 1280 rcu_assign_pointer(dev->wcid[wcid->idx], wcid); 1281 1282 mt76_packet_id_init(wcid); 1283 out: 1284 mutex_unlock(&dev->mutex); 1285 1286 return ret; 1287 } 1288 1289 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1290 struct ieee80211_sta *sta) 1291 { 1292 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1293 int i, idx = wcid->idx; 1294 1295 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++) 1296 mt76_rx_aggr_stop(dev, wcid, i); 1297 1298 if (dev->drv->sta_remove) 1299 dev->drv->sta_remove(dev, vif, sta); 1300 1301 mt76_packet_id_flush(dev, wcid); 1302 1303 mt76_wcid_mask_clear(dev->wcid_mask, idx); 1304 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); 1305 } 1306 EXPORT_SYMBOL_GPL(__mt76_sta_remove); 1307 1308 static void 1309 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1310 struct ieee80211_sta *sta) 1311 { 1312 mutex_lock(&dev->mutex); 1313 __mt76_sta_remove(dev, vif, sta); 1314 mutex_unlock(&dev->mutex); 1315 } 1316 1317 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1318 struct ieee80211_sta *sta, 1319 enum ieee80211_sta_state old_state, 1320 enum ieee80211_sta_state new_state) 1321 { 1322 struct mt76_phy *phy = hw->priv; 1323 struct mt76_dev *dev = phy->dev; 1324 bool ext_phy = phy != &dev->phy; 1325 1326 if (old_state == IEEE80211_STA_NOTEXIST && 1327 new_state == IEEE80211_STA_NONE) 1328 return mt76_sta_add(dev, vif, sta, ext_phy); 1329 1330 if (old_state == IEEE80211_STA_AUTH && 1331 new_state == IEEE80211_STA_ASSOC && 1332 dev->drv->sta_assoc) 1333 dev->drv->sta_assoc(dev, vif, sta); 1334 1335 if (old_state == IEEE80211_STA_NONE && 1336 new_state == IEEE80211_STA_NOTEXIST) 1337 mt76_sta_remove(dev, vif, sta); 1338 1339 return 0; 1340 } 1341 EXPORT_SYMBOL_GPL(mt76_sta_state); 1342 1343 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1344 struct ieee80211_sta *sta) 1345 { 1346 struct mt76_phy *phy = hw->priv; 1347 struct mt76_dev *dev = phy->dev; 1348 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1349 1350 mutex_lock(&dev->mutex); 1351 rcu_assign_pointer(dev->wcid[wcid->idx], NULL); 1352 mutex_unlock(&dev->mutex); 1353 } 1354 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); 1355 1356 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1357 int *dbm) 1358 { 1359 struct mt76_phy *phy = hw->priv; 1360 int n_chains = hweight8(phy->antenna_mask); 1361 int delta = mt76_tx_power_nss_delta(n_chains); 1362 1363 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2); 1364 1365 return 0; 1366 } 1367 EXPORT_SYMBOL_GPL(mt76_get_txpower); 1368 1369 int mt76_init_sar_power(struct ieee80211_hw *hw, 1370 const struct cfg80211_sar_specs *sar) 1371 { 1372 struct mt76_phy *phy = hw->priv; 1373 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa; 1374 int i; 1375 1376 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs) 1377 return -EINVAL; 1378 1379 for (i = 0; i < sar->num_sub_specs; i++) { 1380 u32 index = sar->sub_specs[i].freq_range_index; 1381 /* SAR specifies power limitaton in 0.25dbm */ 1382 s32 power = sar->sub_specs[i].power >> 1; 1383 1384 if (power > 127 || power < -127) 1385 power = 127; 1386 1387 phy->frp[index].range = &capa->freq_ranges[index]; 1388 phy->frp[index].power = power; 1389 } 1390 1391 return 0; 1392 } 1393 EXPORT_SYMBOL_GPL(mt76_init_sar_power); 1394 1395 int mt76_get_sar_power(struct mt76_phy *phy, 1396 struct ieee80211_channel *chan, 1397 int power) 1398 { 1399 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa; 1400 int freq, i; 1401 1402 if (!capa || !phy->frp) 1403 return power; 1404 1405 if (power > 127 || power < -127) 1406 power = 127; 1407 1408 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band); 1409 for (i = 0 ; i < capa->num_freq_ranges; i++) { 1410 if (phy->frp[i].range && 1411 freq >= phy->frp[i].range->start_freq && 1412 freq < phy->frp[i].range->end_freq) { 1413 power = min_t(int, phy->frp[i].power, power); 1414 break; 1415 } 1416 } 1417 1418 return power; 1419 } 1420 EXPORT_SYMBOL_GPL(mt76_get_sar_power); 1421 1422 static void 1423 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 1424 { 1425 if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif)) 1426 ieee80211_csa_finish(vif); 1427 } 1428 1429 void mt76_csa_finish(struct mt76_dev *dev) 1430 { 1431 if (!dev->csa_complete) 1432 return; 1433 1434 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1435 IEEE80211_IFACE_ITER_RESUME_ALL, 1436 __mt76_csa_finish, dev); 1437 1438 dev->csa_complete = 0; 1439 } 1440 EXPORT_SYMBOL_GPL(mt76_csa_finish); 1441 1442 static void 1443 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif) 1444 { 1445 struct mt76_dev *dev = priv; 1446 1447 if (!vif->csa_active) 1448 return; 1449 1450 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif); 1451 } 1452 1453 void mt76_csa_check(struct mt76_dev *dev) 1454 { 1455 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1456 IEEE80211_IFACE_ITER_RESUME_ALL, 1457 __mt76_csa_check, dev); 1458 } 1459 EXPORT_SYMBOL_GPL(mt76_csa_check); 1460 1461 int 1462 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) 1463 { 1464 return 0; 1465 } 1466 EXPORT_SYMBOL_GPL(mt76_set_tim); 1467 1468 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id) 1469 { 1470 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1471 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 1472 u8 *hdr, *pn = status->iv; 1473 1474 __skb_push(skb, 8); 1475 memmove(skb->data, skb->data + 8, hdr_len); 1476 hdr = skb->data + hdr_len; 1477 1478 hdr[0] = pn[5]; 1479 hdr[1] = pn[4]; 1480 hdr[2] = 0; 1481 hdr[3] = 0x20 | (key_id << 6); 1482 hdr[4] = pn[3]; 1483 hdr[5] = pn[2]; 1484 hdr[6] = pn[1]; 1485 hdr[7] = pn[0]; 1486 1487 status->flag &= ~RX_FLAG_IV_STRIPPED; 1488 } 1489 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr); 1490 1491 int mt76_get_rate(struct mt76_dev *dev, 1492 struct ieee80211_supported_band *sband, 1493 int idx, bool cck) 1494 { 1495 int i, offset = 0, len = sband->n_bitrates; 1496 1497 if (cck) { 1498 if (sband != &dev->phy.sband_2g.sband) 1499 return 0; 1500 1501 idx &= ~BIT(2); /* short preamble */ 1502 } else if (sband == &dev->phy.sband_2g.sband) { 1503 offset = 4; 1504 } 1505 1506 for (i = offset; i < len; i++) { 1507 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx) 1508 return i; 1509 } 1510 1511 return 0; 1512 } 1513 EXPORT_SYMBOL_GPL(mt76_get_rate); 1514 1515 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1516 const u8 *mac) 1517 { 1518 struct mt76_phy *phy = hw->priv; 1519 1520 set_bit(MT76_SCANNING, &phy->state); 1521 } 1522 EXPORT_SYMBOL_GPL(mt76_sw_scan); 1523 1524 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1525 { 1526 struct mt76_phy *phy = hw->priv; 1527 1528 clear_bit(MT76_SCANNING, &phy->state); 1529 } 1530 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete); 1531 1532 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 1533 { 1534 struct mt76_phy *phy = hw->priv; 1535 struct mt76_dev *dev = phy->dev; 1536 1537 mutex_lock(&dev->mutex); 1538 *tx_ant = phy->antenna_mask; 1539 *rx_ant = phy->antenna_mask; 1540 mutex_unlock(&dev->mutex); 1541 1542 return 0; 1543 } 1544 EXPORT_SYMBOL_GPL(mt76_get_antenna); 1545 1546 struct mt76_queue * 1547 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, 1548 int ring_base) 1549 { 1550 struct mt76_queue *hwq; 1551 int err; 1552 1553 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL); 1554 if (!hwq) 1555 return ERR_PTR(-ENOMEM); 1556 1557 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); 1558 if (err < 0) 1559 return ERR_PTR(err); 1560 1561 return hwq; 1562 } 1563 EXPORT_SYMBOL_GPL(mt76_init_queue); 1564 1565 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx) 1566 { 1567 int offset = 0; 1568 1569 if (phy->chandef.chan->band != NL80211_BAND_2GHZ) 1570 offset = 4; 1571 1572 /* pick the lowest rate for hidden nodes */ 1573 if (rateidx < 0) 1574 rateidx = 0; 1575 1576 rateidx += offset; 1577 if (rateidx >= ARRAY_SIZE(mt76_rates)) 1578 rateidx = offset; 1579 1580 return mt76_rates[rateidx].hw_value; 1581 } 1582 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate); 1583 1584 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, 1585 struct mt76_sta_stats *stats) 1586 { 1587 int i, ei = wi->initial_stat_idx; 1588 u64 *data = wi->data; 1589 1590 wi->sta_count++; 1591 1592 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK]; 1593 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM]; 1594 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT]; 1595 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF]; 1596 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT]; 1597 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU]; 1598 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU]; 1599 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB]; 1600 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU]; 1601 1602 for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++) 1603 data[ei++] += stats->tx_bw[i]; 1604 1605 for (i = 0; i < 12; i++) 1606 data[ei++] += stats->tx_mcs[i]; 1607 1608 wi->worker_stat_count = ei - wi->initial_stat_idx; 1609 } 1610 EXPORT_SYMBOL_GPL(mt76_ethtool_worker); 1611 1612 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy) 1613 { 1614 struct ieee80211_hw *hw = phy->hw; 1615 struct mt76_dev *dev = phy->dev; 1616 1617 if (dev->region == NL80211_DFS_UNSET || 1618 test_bit(MT76_SCANNING, &phy->state)) 1619 return MT_DFS_STATE_DISABLED; 1620 1621 if (!hw->conf.radar_enabled) { 1622 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) && 1623 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR)) 1624 return MT_DFS_STATE_ACTIVE; 1625 1626 return MT_DFS_STATE_DISABLED; 1627 } 1628 1629 if (phy->chandef.chan->dfs_state != NL80211_DFS_AVAILABLE) 1630 return MT_DFS_STATE_CAC; 1631 1632 return MT_DFS_STATE_ACTIVE; 1633 } 1634 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state); 1635