1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 #include <linux/sched.h> 6 #include <linux/of.h> 7 #include "mt76.h" 8 9 #define CHAN2G(_idx, _freq) { \ 10 .band = NL80211_BAND_2GHZ, \ 11 .center_freq = (_freq), \ 12 .hw_value = (_idx), \ 13 .max_power = 30, \ 14 } 15 16 #define CHAN5G(_idx, _freq) { \ 17 .band = NL80211_BAND_5GHZ, \ 18 .center_freq = (_freq), \ 19 .hw_value = (_idx), \ 20 .max_power = 30, \ 21 } 22 23 #define CHAN6G(_idx, _freq) { \ 24 .band = NL80211_BAND_6GHZ, \ 25 .center_freq = (_freq), \ 26 .hw_value = (_idx), \ 27 .max_power = 30, \ 28 } 29 30 static const struct ieee80211_channel mt76_channels_2ghz[] = { 31 CHAN2G(1, 2412), 32 CHAN2G(2, 2417), 33 CHAN2G(3, 2422), 34 CHAN2G(4, 2427), 35 CHAN2G(5, 2432), 36 CHAN2G(6, 2437), 37 CHAN2G(7, 2442), 38 CHAN2G(8, 2447), 39 CHAN2G(9, 2452), 40 CHAN2G(10, 2457), 41 CHAN2G(11, 2462), 42 CHAN2G(12, 2467), 43 CHAN2G(13, 2472), 44 CHAN2G(14, 2484), 45 }; 46 47 static const struct ieee80211_channel mt76_channels_5ghz[] = { 48 CHAN5G(36, 5180), 49 CHAN5G(40, 5200), 50 CHAN5G(44, 5220), 51 CHAN5G(48, 5240), 52 53 CHAN5G(52, 5260), 54 CHAN5G(56, 5280), 55 CHAN5G(60, 5300), 56 CHAN5G(64, 5320), 57 58 CHAN5G(100, 5500), 59 CHAN5G(104, 5520), 60 CHAN5G(108, 5540), 61 CHAN5G(112, 5560), 62 CHAN5G(116, 5580), 63 CHAN5G(120, 5600), 64 CHAN5G(124, 5620), 65 CHAN5G(128, 5640), 66 CHAN5G(132, 5660), 67 CHAN5G(136, 5680), 68 CHAN5G(140, 5700), 69 CHAN5G(144, 5720), 70 71 CHAN5G(149, 5745), 72 CHAN5G(153, 5765), 73 CHAN5G(157, 5785), 74 CHAN5G(161, 5805), 75 CHAN5G(165, 5825), 76 CHAN5G(169, 5845), 77 CHAN5G(173, 5865), 78 }; 79 80 static const struct ieee80211_channel mt76_channels_6ghz[] = { 81 /* UNII-5 */ 82 CHAN6G(1, 5955), 83 CHAN6G(5, 5975), 84 CHAN6G(9, 5995), 85 CHAN6G(13, 6015), 86 CHAN6G(17, 6035), 87 CHAN6G(21, 6055), 88 CHAN6G(25, 6075), 89 CHAN6G(29, 6095), 90 CHAN6G(33, 6115), 91 CHAN6G(37, 6135), 92 CHAN6G(41, 6155), 93 CHAN6G(45, 6175), 94 CHAN6G(49, 6195), 95 CHAN6G(53, 6215), 96 CHAN6G(57, 6235), 97 CHAN6G(61, 6255), 98 CHAN6G(65, 6275), 99 CHAN6G(69, 6295), 100 CHAN6G(73, 6315), 101 CHAN6G(77, 6335), 102 CHAN6G(81, 6355), 103 CHAN6G(85, 6375), 104 CHAN6G(89, 6395), 105 CHAN6G(93, 6415), 106 /* UNII-6 */ 107 CHAN6G(97, 6435), 108 CHAN6G(101, 6455), 109 CHAN6G(105, 6475), 110 CHAN6G(109, 6495), 111 CHAN6G(113, 6515), 112 CHAN6G(117, 6535), 113 /* UNII-7 */ 114 CHAN6G(121, 6555), 115 CHAN6G(125, 6575), 116 CHAN6G(129, 6595), 117 CHAN6G(133, 6615), 118 CHAN6G(137, 6635), 119 CHAN6G(141, 6655), 120 CHAN6G(145, 6675), 121 CHAN6G(149, 6695), 122 CHAN6G(153, 6715), 123 CHAN6G(157, 6735), 124 CHAN6G(161, 6755), 125 CHAN6G(165, 6775), 126 CHAN6G(169, 6795), 127 CHAN6G(173, 6815), 128 CHAN6G(177, 6835), 129 CHAN6G(181, 6855), 130 CHAN6G(185, 6875), 131 /* UNII-8 */ 132 CHAN6G(189, 6895), 133 CHAN6G(193, 6915), 134 CHAN6G(197, 6935), 135 CHAN6G(201, 6955), 136 CHAN6G(205, 6975), 137 CHAN6G(209, 6995), 138 CHAN6G(213, 7015), 139 CHAN6G(217, 7035), 140 CHAN6G(221, 7055), 141 CHAN6G(225, 7075), 142 CHAN6G(229, 7095), 143 CHAN6G(233, 7115), 144 }; 145 146 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { 147 { .throughput = 0 * 1024, .blink_time = 334 }, 148 { .throughput = 1 * 1024, .blink_time = 260 }, 149 { .throughput = 5 * 1024, .blink_time = 220 }, 150 { .throughput = 10 * 1024, .blink_time = 190 }, 151 { .throughput = 20 * 1024, .blink_time = 170 }, 152 { .throughput = 50 * 1024, .blink_time = 150 }, 153 { .throughput = 70 * 1024, .blink_time = 130 }, 154 { .throughput = 100 * 1024, .blink_time = 110 }, 155 { .throughput = 200 * 1024, .blink_time = 80 }, 156 { .throughput = 300 * 1024, .blink_time = 50 }, 157 }; 158 159 struct ieee80211_rate mt76_rates[] = { 160 CCK_RATE(0, 10), 161 CCK_RATE(1, 20), 162 CCK_RATE(2, 55), 163 CCK_RATE(3, 110), 164 OFDM_RATE(11, 60), 165 OFDM_RATE(15, 90), 166 OFDM_RATE(10, 120), 167 OFDM_RATE(14, 180), 168 OFDM_RATE(9, 240), 169 OFDM_RATE(13, 360), 170 OFDM_RATE(8, 480), 171 OFDM_RATE(12, 540), 172 }; 173 EXPORT_SYMBOL_GPL(mt76_rates); 174 175 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = { 176 { .start_freq = 2402, .end_freq = 2494, }, 177 { .start_freq = 5150, .end_freq = 5350, }, 178 { .start_freq = 5350, .end_freq = 5470, }, 179 { .start_freq = 5470, .end_freq = 5725, }, 180 { .start_freq = 5725, .end_freq = 5950, }, 181 }; 182 183 const struct cfg80211_sar_capa mt76_sar_capa = { 184 .type = NL80211_SAR_TYPE_POWER, 185 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges), 186 .freq_ranges = &mt76_sar_freq_ranges[0], 187 }; 188 189 static int mt76_led_init(struct mt76_dev *dev) 190 { 191 struct device_node *np = dev->dev->of_node; 192 struct ieee80211_hw *hw = dev->hw; 193 int led_pin; 194 195 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) 196 return 0; 197 198 snprintf(dev->led_name, sizeof(dev->led_name), 199 "mt76-%s", wiphy_name(hw->wiphy)); 200 201 dev->led_cdev.name = dev->led_name; 202 dev->led_cdev.default_trigger = 203 ieee80211_create_tpt_led_trigger(hw, 204 IEEE80211_TPT_LEDTRIG_FL_RADIO, 205 mt76_tpt_blink, 206 ARRAY_SIZE(mt76_tpt_blink)); 207 208 np = of_get_child_by_name(np, "led"); 209 if (np) { 210 if (!of_property_read_u32(np, "led-sources", &led_pin)) 211 dev->led_pin = led_pin; 212 dev->led_al = of_property_read_bool(np, "led-active-low"); 213 } 214 215 return led_classdev_register(dev->dev, &dev->led_cdev); 216 } 217 218 static void mt76_led_cleanup(struct mt76_dev *dev) 219 { 220 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) 221 return; 222 223 led_classdev_unregister(&dev->led_cdev); 224 } 225 226 static void mt76_init_stream_cap(struct mt76_phy *phy, 227 struct ieee80211_supported_band *sband, 228 bool vht) 229 { 230 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; 231 int i, nstream = hweight8(phy->antenna_mask); 232 struct ieee80211_sta_vht_cap *vht_cap; 233 u16 mcs_map = 0; 234 235 if (nstream > 1) 236 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; 237 else 238 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; 239 240 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) 241 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0; 242 243 if (!vht) 244 return; 245 246 vht_cap = &sband->vht_cap; 247 if (nstream > 1) 248 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 249 else 250 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC; 251 252 for (i = 0; i < 8; i++) { 253 if (i < nstream) 254 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2)); 255 else 256 mcs_map |= 257 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2)); 258 } 259 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 260 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 261 } 262 263 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) 264 { 265 if (phy->cap.has_2ghz) 266 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); 267 if (phy->cap.has_5ghz) 268 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); 269 if (phy->cap.has_6ghz) 270 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht); 271 } 272 EXPORT_SYMBOL_GPL(mt76_set_stream_caps); 273 274 static int 275 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, 276 const struct ieee80211_channel *chan, int n_chan, 277 struct ieee80211_rate *rates, int n_rates, 278 bool ht, bool vht) 279 { 280 struct ieee80211_supported_band *sband = &msband->sband; 281 struct ieee80211_sta_vht_cap *vht_cap; 282 struct ieee80211_sta_ht_cap *ht_cap; 283 struct mt76_dev *dev = phy->dev; 284 void *chanlist; 285 int size; 286 287 size = n_chan * sizeof(*chan); 288 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); 289 if (!chanlist) 290 return -ENOMEM; 291 292 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan), 293 GFP_KERNEL); 294 if (!msband->chan) 295 return -ENOMEM; 296 297 sband->channels = chanlist; 298 sband->n_channels = n_chan; 299 sband->bitrates = rates; 300 sband->n_bitrates = n_rates; 301 302 if (!ht) 303 return 0; 304 305 ht_cap = &sband->ht_cap; 306 ht_cap->ht_supported = true; 307 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 308 IEEE80211_HT_CAP_GRN_FLD | 309 IEEE80211_HT_CAP_SGI_20 | 310 IEEE80211_HT_CAP_SGI_40 | 311 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 312 313 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 314 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 315 316 mt76_init_stream_cap(phy, sband, vht); 317 318 if (!vht) 319 return 0; 320 321 vht_cap = &sband->vht_cap; 322 vht_cap->vht_supported = true; 323 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | 324 IEEE80211_VHT_CAP_RXSTBC_1 | 325 IEEE80211_VHT_CAP_SHORT_GI_80 | 326 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | 327 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | 328 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); 329 330 return 0; 331 } 332 333 static int 334 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates, 335 int n_rates) 336 { 337 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband; 338 339 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz, 340 ARRAY_SIZE(mt76_channels_2ghz), rates, 341 n_rates, true, false); 342 } 343 344 static int 345 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates, 346 int n_rates, bool vht) 347 { 348 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband; 349 350 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz, 351 ARRAY_SIZE(mt76_channels_5ghz), rates, 352 n_rates, true, vht); 353 } 354 355 static int 356 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates, 357 int n_rates) 358 { 359 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband; 360 361 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz, 362 ARRAY_SIZE(mt76_channels_6ghz), rates, 363 n_rates, false, false); 364 } 365 366 static void 367 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband, 368 enum nl80211_band band) 369 { 370 struct ieee80211_supported_band *sband = &msband->sband; 371 bool found = false; 372 int i; 373 374 if (!sband) 375 return; 376 377 for (i = 0; i < sband->n_channels; i++) { 378 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED) 379 continue; 380 381 found = true; 382 break; 383 } 384 385 if (found) { 386 phy->chandef.chan = &sband->channels[0]; 387 phy->chan_state = &msband->chan[0]; 388 return; 389 } 390 391 sband->n_channels = 0; 392 phy->hw->wiphy->bands[band] = NULL; 393 } 394 395 static int 396 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) 397 { 398 struct mt76_dev *dev = phy->dev; 399 struct wiphy *wiphy = hw->wiphy; 400 401 SET_IEEE80211_DEV(hw, dev->dev); 402 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); 403 404 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 405 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | 406 WIPHY_FLAG_SUPPORTS_TDLS | 407 WIPHY_FLAG_AP_UAPSD; 408 409 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 410 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); 411 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL); 412 413 wiphy->available_antennas_tx = phy->antenna_mask; 414 wiphy->available_antennas_rx = phy->antenna_mask; 415 416 wiphy->sar_capa = &mt76_sar_capa; 417 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges, 418 sizeof(struct mt76_freq_range_power), 419 GFP_KERNEL); 420 if (!phy->frp) 421 return -ENOMEM; 422 423 hw->txq_data_size = sizeof(struct mt76_txq); 424 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; 425 426 if (!hw->max_tx_fragments) 427 hw->max_tx_fragments = 16; 428 429 ieee80211_hw_set(hw, SIGNAL_DBM); 430 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 431 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); 432 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 433 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 434 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 435 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 436 ieee80211_hw_set(hw, TX_AMSDU); 437 ieee80211_hw_set(hw, TX_FRAG_LIST); 438 ieee80211_hw_set(hw, MFP_CAPABLE); 439 ieee80211_hw_set(hw, AP_LINK_PS); 440 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 441 442 return 0; 443 } 444 445 struct mt76_phy * 446 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 447 const struct ieee80211_ops *ops) 448 { 449 struct ieee80211_hw *hw; 450 unsigned int phy_size; 451 struct mt76_phy *phy; 452 453 phy_size = ALIGN(sizeof(*phy), 8); 454 hw = ieee80211_alloc_hw(size + phy_size, ops); 455 if (!hw) 456 return NULL; 457 458 phy = hw->priv; 459 phy->dev = dev; 460 phy->hw = hw; 461 phy->priv = hw->priv + phy_size; 462 463 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 464 hw->wiphy->interface_modes = 465 BIT(NL80211_IFTYPE_STATION) | 466 BIT(NL80211_IFTYPE_AP) | 467 #ifdef CONFIG_MAC80211_MESH 468 BIT(NL80211_IFTYPE_MESH_POINT) | 469 #endif 470 BIT(NL80211_IFTYPE_P2P_CLIENT) | 471 BIT(NL80211_IFTYPE_P2P_GO) | 472 BIT(NL80211_IFTYPE_ADHOC); 473 474 return phy; 475 } 476 EXPORT_SYMBOL_GPL(mt76_alloc_phy); 477 478 int mt76_register_phy(struct mt76_phy *phy, bool vht, 479 struct ieee80211_rate *rates, int n_rates) 480 { 481 int ret; 482 483 ret = mt76_phy_init(phy, phy->hw); 484 if (ret) 485 return ret; 486 487 if (phy->cap.has_2ghz) { 488 ret = mt76_init_sband_2g(phy, rates, n_rates); 489 if (ret) 490 return ret; 491 } 492 493 if (phy->cap.has_5ghz) { 494 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 495 if (ret) 496 return ret; 497 } 498 499 if (phy->cap.has_6ghz) { 500 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); 501 if (ret) 502 return ret; 503 } 504 505 wiphy_read_of_freq_limits(phy->hw->wiphy); 506 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ); 507 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ); 508 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ); 509 510 ret = ieee80211_register_hw(phy->hw); 511 if (ret) 512 return ret; 513 514 phy->dev->phy2 = phy; 515 516 return 0; 517 } 518 EXPORT_SYMBOL_GPL(mt76_register_phy); 519 520 void mt76_unregister_phy(struct mt76_phy *phy) 521 { 522 struct mt76_dev *dev = phy->dev; 523 524 mt76_tx_status_check(dev, true); 525 ieee80211_unregister_hw(phy->hw); 526 dev->phy2 = NULL; 527 } 528 EXPORT_SYMBOL_GPL(mt76_unregister_phy); 529 530 struct mt76_dev * 531 mt76_alloc_device(struct device *pdev, unsigned int size, 532 const struct ieee80211_ops *ops, 533 const struct mt76_driver_ops *drv_ops) 534 { 535 struct ieee80211_hw *hw; 536 struct mt76_phy *phy; 537 struct mt76_dev *dev; 538 int i; 539 540 hw = ieee80211_alloc_hw(size, ops); 541 if (!hw) 542 return NULL; 543 544 dev = hw->priv; 545 dev->hw = hw; 546 dev->dev = pdev; 547 dev->drv = drv_ops; 548 549 phy = &dev->phy; 550 phy->dev = dev; 551 phy->hw = hw; 552 553 spin_lock_init(&dev->rx_lock); 554 spin_lock_init(&dev->lock); 555 spin_lock_init(&dev->cc_lock); 556 spin_lock_init(&dev->status_lock); 557 mutex_init(&dev->mutex); 558 init_waitqueue_head(&dev->tx_wait); 559 560 skb_queue_head_init(&dev->mcu.res_q); 561 init_waitqueue_head(&dev->mcu.wait); 562 mutex_init(&dev->mcu.mutex); 563 dev->tx_worker.fn = mt76_tx_worker; 564 565 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 566 hw->wiphy->interface_modes = 567 BIT(NL80211_IFTYPE_STATION) | 568 BIT(NL80211_IFTYPE_AP) | 569 #ifdef CONFIG_MAC80211_MESH 570 BIT(NL80211_IFTYPE_MESH_POINT) | 571 #endif 572 BIT(NL80211_IFTYPE_P2P_CLIENT) | 573 BIT(NL80211_IFTYPE_P2P_GO) | 574 BIT(NL80211_IFTYPE_ADHOC); 575 576 spin_lock_init(&dev->token_lock); 577 idr_init(&dev->token); 578 579 INIT_LIST_HEAD(&dev->wcid_list); 580 581 INIT_LIST_HEAD(&dev->txwi_cache); 582 583 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) 584 skb_queue_head_init(&dev->rx_skb[i]); 585 586 dev->wq = alloc_ordered_workqueue("mt76", 0); 587 if (!dev->wq) { 588 ieee80211_free_hw(hw); 589 return NULL; 590 } 591 592 return dev; 593 } 594 EXPORT_SYMBOL_GPL(mt76_alloc_device); 595 596 int mt76_register_device(struct mt76_dev *dev, bool vht, 597 struct ieee80211_rate *rates, int n_rates) 598 { 599 struct ieee80211_hw *hw = dev->hw; 600 struct mt76_phy *phy = &dev->phy; 601 int ret; 602 603 dev_set_drvdata(dev->dev, dev); 604 ret = mt76_phy_init(phy, hw); 605 if (ret) 606 return ret; 607 608 if (phy->cap.has_2ghz) { 609 ret = mt76_init_sband_2g(phy, rates, n_rates); 610 if (ret) 611 return ret; 612 } 613 614 if (phy->cap.has_5ghz) { 615 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 616 if (ret) 617 return ret; 618 } 619 620 if (phy->cap.has_6ghz) { 621 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); 622 if (ret) 623 return ret; 624 } 625 626 wiphy_read_of_freq_limits(hw->wiphy); 627 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ); 628 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ); 629 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ); 630 631 if (IS_ENABLED(CONFIG_MT76_LEDS)) { 632 ret = mt76_led_init(dev); 633 if (ret) 634 return ret; 635 } 636 637 ret = ieee80211_register_hw(hw); 638 if (ret) 639 return ret; 640 641 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx")); 642 sched_set_fifo_low(dev->tx_worker.task); 643 644 return 0; 645 } 646 EXPORT_SYMBOL_GPL(mt76_register_device); 647 648 void mt76_unregister_device(struct mt76_dev *dev) 649 { 650 struct ieee80211_hw *hw = dev->hw; 651 652 if (IS_ENABLED(CONFIG_MT76_LEDS)) 653 mt76_led_cleanup(dev); 654 mt76_tx_status_check(dev, true); 655 ieee80211_unregister_hw(hw); 656 } 657 EXPORT_SYMBOL_GPL(mt76_unregister_device); 658 659 void mt76_free_device(struct mt76_dev *dev) 660 { 661 mt76_worker_teardown(&dev->tx_worker); 662 if (dev->wq) { 663 destroy_workqueue(dev->wq); 664 dev->wq = NULL; 665 } 666 ieee80211_free_hw(dev->hw); 667 } 668 EXPORT_SYMBOL_GPL(mt76_free_device); 669 670 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) 671 { 672 struct sk_buff *skb = phy->rx_amsdu[q].head; 673 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 674 struct mt76_dev *dev = phy->dev; 675 676 phy->rx_amsdu[q].head = NULL; 677 phy->rx_amsdu[q].tail = NULL; 678 679 /* 680 * Validate if the amsdu has a proper first subframe. 681 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU 682 * flag of the QoS header gets flipped. In such cases, the first 683 * subframe has a LLC/SNAP header in the location of the destination 684 * address. 685 */ 686 if (skb_shinfo(skb)->frag_list) { 687 int offset = 0; 688 689 if (!(status->flag & RX_FLAG_8023)) { 690 offset = ieee80211_get_hdrlen_from_skb(skb); 691 692 if ((status->flag & 693 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) == 694 RX_FLAG_DECRYPTED) 695 offset += 8; 696 } 697 698 if (ether_addr_equal(skb->data + offset, rfc1042_header)) { 699 dev_kfree_skb(skb); 700 return; 701 } 702 } 703 __skb_queue_tail(&dev->rx_skb[q], skb); 704 } 705 706 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q, 707 struct sk_buff *skb) 708 { 709 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 710 711 if (phy->rx_amsdu[q].head && 712 (!status->amsdu || status->first_amsdu || 713 status->seqno != phy->rx_amsdu[q].seqno)) 714 mt76_rx_release_amsdu(phy, q); 715 716 if (!phy->rx_amsdu[q].head) { 717 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list; 718 phy->rx_amsdu[q].seqno = status->seqno; 719 phy->rx_amsdu[q].head = skb; 720 } else { 721 *phy->rx_amsdu[q].tail = skb; 722 phy->rx_amsdu[q].tail = &skb->next; 723 } 724 725 if (!status->amsdu || status->last_amsdu) 726 mt76_rx_release_amsdu(phy, q); 727 } 728 729 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) 730 { 731 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 732 struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy); 733 734 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) { 735 dev_kfree_skb(skb); 736 return; 737 } 738 739 #ifdef CONFIG_NL80211_TESTMODE 740 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) { 741 phy->test.rx_stats.packets[q]++; 742 if (status->flag & RX_FLAG_FAILED_FCS_CRC) 743 phy->test.rx_stats.fcs_error[q]++; 744 } 745 #endif 746 747 mt76_rx_release_burst(phy, q, skb); 748 } 749 EXPORT_SYMBOL_GPL(mt76_rx); 750 751 bool mt76_has_tx_pending(struct mt76_phy *phy) 752 { 753 struct mt76_queue *q; 754 int i; 755 756 for (i = 0; i < __MT_TXQ_MAX; i++) { 757 q = phy->q_tx[i]; 758 if (q && q->queued) 759 return true; 760 } 761 762 return false; 763 } 764 EXPORT_SYMBOL_GPL(mt76_has_tx_pending); 765 766 static struct mt76_channel_state * 767 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c) 768 { 769 struct mt76_sband *msband; 770 int idx; 771 772 if (c->band == NL80211_BAND_2GHZ) 773 msband = &phy->sband_2g; 774 else if (c->band == NL80211_BAND_6GHZ) 775 msband = &phy->sband_6g; 776 else 777 msband = &phy->sband_5g; 778 779 idx = c - &msband->sband.channels[0]; 780 return &msband->chan[idx]; 781 } 782 783 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) 784 { 785 struct mt76_channel_state *state = phy->chan_state; 786 787 state->cc_active += ktime_to_us(ktime_sub(time, 788 phy->survey_time)); 789 phy->survey_time = time; 790 } 791 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time); 792 793 void mt76_update_survey(struct mt76_phy *phy) 794 { 795 struct mt76_dev *dev = phy->dev; 796 ktime_t cur_time; 797 798 if (dev->drv->update_survey) 799 dev->drv->update_survey(phy); 800 801 cur_time = ktime_get_boottime(); 802 mt76_update_survey_active_time(phy, cur_time); 803 804 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) { 805 struct mt76_channel_state *state = phy->chan_state; 806 807 spin_lock_bh(&dev->cc_lock); 808 state->cc_bss_rx += dev->cur_cc_bss_rx; 809 dev->cur_cc_bss_rx = 0; 810 spin_unlock_bh(&dev->cc_lock); 811 } 812 } 813 EXPORT_SYMBOL_GPL(mt76_update_survey); 814 815 void mt76_set_channel(struct mt76_phy *phy) 816 { 817 struct mt76_dev *dev = phy->dev; 818 struct ieee80211_hw *hw = phy->hw; 819 struct cfg80211_chan_def *chandef = &hw->conf.chandef; 820 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; 821 int timeout = HZ / 5; 822 823 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout); 824 mt76_update_survey(phy); 825 826 phy->chandef = *chandef; 827 phy->chan_state = mt76_channel_state(phy, chandef->chan); 828 829 if (!offchannel) 830 phy->main_chan = chandef->chan; 831 832 if (chandef->chan != phy->main_chan) 833 memset(phy->chan_state, 0, sizeof(*phy->chan_state)); 834 } 835 EXPORT_SYMBOL_GPL(mt76_set_channel); 836 837 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 838 struct survey_info *survey) 839 { 840 struct mt76_phy *phy = hw->priv; 841 struct mt76_dev *dev = phy->dev; 842 struct mt76_sband *sband; 843 struct ieee80211_channel *chan; 844 struct mt76_channel_state *state; 845 int ret = 0; 846 847 mutex_lock(&dev->mutex); 848 if (idx == 0 && dev->drv->update_survey) 849 mt76_update_survey(phy); 850 851 if (idx >= phy->sband_2g.sband.n_channels + 852 phy->sband_5g.sband.n_channels) { 853 idx -= (phy->sband_2g.sband.n_channels + 854 phy->sband_5g.sband.n_channels); 855 sband = &phy->sband_6g; 856 } else if (idx >= phy->sband_2g.sband.n_channels) { 857 idx -= phy->sband_2g.sband.n_channels; 858 sband = &phy->sband_5g; 859 } else { 860 sband = &phy->sband_2g; 861 } 862 863 if (idx >= sband->sband.n_channels) { 864 ret = -ENOENT; 865 goto out; 866 } 867 868 chan = &sband->sband.channels[idx]; 869 state = mt76_channel_state(phy, chan); 870 871 memset(survey, 0, sizeof(*survey)); 872 survey->channel = chan; 873 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; 874 survey->filled |= dev->drv->survey_flags; 875 if (state->noise) 876 survey->filled |= SURVEY_INFO_NOISE_DBM; 877 878 if (chan == phy->main_chan) { 879 survey->filled |= SURVEY_INFO_IN_USE; 880 881 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) 882 survey->filled |= SURVEY_INFO_TIME_BSS_RX; 883 } 884 885 survey->time_busy = div_u64(state->cc_busy, 1000); 886 survey->time_rx = div_u64(state->cc_rx, 1000); 887 survey->time = div_u64(state->cc_active, 1000); 888 survey->noise = state->noise; 889 890 spin_lock_bh(&dev->cc_lock); 891 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000); 892 survey->time_tx = div_u64(state->cc_tx, 1000); 893 spin_unlock_bh(&dev->cc_lock); 894 895 out: 896 mutex_unlock(&dev->mutex); 897 898 return ret; 899 } 900 EXPORT_SYMBOL_GPL(mt76_get_survey); 901 902 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 903 struct ieee80211_key_conf *key) 904 { 905 struct ieee80211_key_seq seq; 906 int i; 907 908 wcid->rx_check_pn = false; 909 910 if (!key) 911 return; 912 913 if (key->cipher != WLAN_CIPHER_SUITE_CCMP) 914 return; 915 916 wcid->rx_check_pn = true; 917 918 /* data frame */ 919 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 920 ieee80211_get_key_rx_seq(key, i, &seq); 921 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); 922 } 923 924 /* robust management frame */ 925 ieee80211_get_key_rx_seq(key, -1, &seq); 926 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); 927 928 } 929 EXPORT_SYMBOL(mt76_wcid_key_setup); 930 931 static void 932 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, 933 struct ieee80211_hw **hw, 934 struct ieee80211_sta **sta) 935 { 936 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 937 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 938 struct mt76_rx_status mstat; 939 940 mstat = *((struct mt76_rx_status *)skb->cb); 941 memset(status, 0, sizeof(*status)); 942 943 status->flag = mstat.flag; 944 status->freq = mstat.freq; 945 status->enc_flags = mstat.enc_flags; 946 status->encoding = mstat.encoding; 947 status->bw = mstat.bw; 948 status->he_ru = mstat.he_ru; 949 status->he_gi = mstat.he_gi; 950 status->he_dcm = mstat.he_dcm; 951 status->rate_idx = mstat.rate_idx; 952 status->nss = mstat.nss; 953 status->band = mstat.band; 954 status->signal = mstat.signal; 955 status->chains = mstat.chains; 956 status->ampdu_reference = mstat.ampdu_ref; 957 status->device_timestamp = mstat.timestamp; 958 status->mactime = mstat.timestamp; 959 960 if (ieee80211_is_beacon(hdr->frame_control) || 961 ieee80211_is_probe_resp(hdr->frame_control)) 962 status->boottime_ns = ktime_get_boottime_ns(); 963 964 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); 965 BUILD_BUG_ON(sizeof(status->chain_signal) != 966 sizeof(mstat.chain_signal)); 967 memcpy(status->chain_signal, mstat.chain_signal, 968 sizeof(mstat.chain_signal)); 969 970 *sta = wcid_to_sta(mstat.wcid); 971 *hw = mt76_phy_hw(dev, mstat.ext_phy); 972 } 973 974 static int 975 mt76_check_ccmp_pn(struct sk_buff *skb) 976 { 977 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 978 struct mt76_wcid *wcid = status->wcid; 979 struct ieee80211_hdr *hdr; 980 int security_idx; 981 int ret; 982 983 if (!(status->flag & RX_FLAG_DECRYPTED)) 984 return 0; 985 986 if (!wcid || !wcid->rx_check_pn) 987 return 0; 988 989 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 990 if (status->flag & RX_FLAG_8023) 991 goto skip_hdr_check; 992 993 hdr = mt76_skb_get_hdr(skb); 994 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 995 /* 996 * Validate the first fragment both here and in mac80211 997 * All further fragments will be validated by mac80211 only. 998 */ 999 if (ieee80211_is_frag(hdr) && 1000 !ieee80211_is_first_frag(hdr->frame_control)) 1001 return 0; 1002 } 1003 1004 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c): 1005 * 1006 * the recipient shall maintain a single replay counter for received 1007 * individually addressed robust Management frames that are received 1008 * with the To DS subfield equal to 0, [...] 1009 */ 1010 if (ieee80211_is_mgmt(hdr->frame_control) && 1011 !ieee80211_has_tods(hdr->frame_control)) 1012 security_idx = IEEE80211_NUM_TIDS; 1013 1014 skip_hdr_check: 1015 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); 1016 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx], 1017 sizeof(status->iv)); 1018 if (ret <= 0) 1019 return -EINVAL; /* replay */ 1020 1021 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv)); 1022 1023 if (status->flag & RX_FLAG_IV_STRIPPED) 1024 status->flag |= RX_FLAG_PN_VALIDATED; 1025 1026 return 0; 1027 } 1028 1029 static void 1030 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, 1031 int len) 1032 { 1033 struct mt76_wcid *wcid = status->wcid; 1034 struct ieee80211_rx_status info = { 1035 .enc_flags = status->enc_flags, 1036 .rate_idx = status->rate_idx, 1037 .encoding = status->encoding, 1038 .band = status->band, 1039 .nss = status->nss, 1040 .bw = status->bw, 1041 }; 1042 struct ieee80211_sta *sta; 1043 u32 airtime; 1044 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1045 1046 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len); 1047 spin_lock(&dev->cc_lock); 1048 dev->cur_cc_bss_rx += airtime; 1049 spin_unlock(&dev->cc_lock); 1050 1051 if (!wcid || !wcid->sta) 1052 return; 1053 1054 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 1055 ieee80211_sta_register_airtime(sta, tidno, 0, airtime); 1056 } 1057 1058 static void 1059 mt76_airtime_flush_ampdu(struct mt76_dev *dev) 1060 { 1061 struct mt76_wcid *wcid; 1062 int wcid_idx; 1063 1064 if (!dev->rx_ampdu_len) 1065 return; 1066 1067 wcid_idx = dev->rx_ampdu_status.wcid_idx; 1068 if (wcid_idx < ARRAY_SIZE(dev->wcid)) 1069 wcid = rcu_dereference(dev->wcid[wcid_idx]); 1070 else 1071 wcid = NULL; 1072 dev->rx_ampdu_status.wcid = wcid; 1073 1074 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len); 1075 1076 dev->rx_ampdu_len = 0; 1077 dev->rx_ampdu_ref = 0; 1078 } 1079 1080 static void 1081 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) 1082 { 1083 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1084 struct mt76_wcid *wcid = status->wcid; 1085 1086 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)) 1087 return; 1088 1089 if (!wcid || !wcid->sta) { 1090 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 1091 1092 if (status->flag & RX_FLAG_8023) 1093 return; 1094 1095 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr)) 1096 return; 1097 1098 wcid = NULL; 1099 } 1100 1101 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) || 1102 status->ampdu_ref != dev->rx_ampdu_ref) 1103 mt76_airtime_flush_ampdu(dev); 1104 1105 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 1106 if (!dev->rx_ampdu_len || 1107 status->ampdu_ref != dev->rx_ampdu_ref) { 1108 dev->rx_ampdu_status = *status; 1109 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff; 1110 dev->rx_ampdu_ref = status->ampdu_ref; 1111 } 1112 1113 dev->rx_ampdu_len += skb->len; 1114 return; 1115 } 1116 1117 mt76_airtime_report(dev, status, skb->len); 1118 } 1119 1120 static void 1121 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) 1122 { 1123 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1124 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 1125 struct ieee80211_sta *sta; 1126 struct ieee80211_hw *hw; 1127 struct mt76_wcid *wcid = status->wcid; 1128 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1129 bool ps; 1130 1131 hw = mt76_phy_hw(dev, status->ext_phy); 1132 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid && 1133 !(status->flag & RX_FLAG_8023)) { 1134 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); 1135 if (sta) 1136 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv; 1137 } 1138 1139 mt76_airtime_check(dev, skb); 1140 1141 if (!wcid || !wcid->sta) 1142 return; 1143 1144 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 1145 1146 if (status->signal <= 0) 1147 ewma_signal_add(&wcid->rssi, -status->signal); 1148 1149 wcid->inactive_count = 0; 1150 1151 if (status->flag & RX_FLAG_8023) 1152 return; 1153 1154 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) 1155 return; 1156 1157 if (ieee80211_is_pspoll(hdr->frame_control)) { 1158 ieee80211_sta_pspoll(sta); 1159 return; 1160 } 1161 1162 if (ieee80211_has_morefrags(hdr->frame_control) || 1163 !(ieee80211_is_mgmt(hdr->frame_control) || 1164 ieee80211_is_data(hdr->frame_control))) 1165 return; 1166 1167 ps = ieee80211_has_pm(hdr->frame_control); 1168 1169 if (ps && (ieee80211_is_data_qos(hdr->frame_control) || 1170 ieee80211_is_qos_nullfunc(hdr->frame_control))) 1171 ieee80211_sta_uapsd_trigger(sta, tidno); 1172 1173 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) 1174 return; 1175 1176 if (ps) 1177 set_bit(MT_WCID_FLAG_PS, &wcid->flags); 1178 1179 dev->drv->sta_ps(dev, sta, ps); 1180 1181 if (!ps) 1182 clear_bit(MT_WCID_FLAG_PS, &wcid->flags); 1183 1184 ieee80211_sta_ps_transition(sta, ps); 1185 } 1186 1187 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 1188 struct napi_struct *napi) 1189 { 1190 struct ieee80211_sta *sta; 1191 struct ieee80211_hw *hw; 1192 struct sk_buff *skb, *tmp; 1193 LIST_HEAD(list); 1194 1195 spin_lock(&dev->rx_lock); 1196 while ((skb = __skb_dequeue(frames)) != NULL) { 1197 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1198 1199 if (mt76_check_ccmp_pn(skb)) { 1200 dev_kfree_skb(skb); 1201 continue; 1202 } 1203 1204 skb_shinfo(skb)->frag_list = NULL; 1205 mt76_rx_convert(dev, skb, &hw, &sta); 1206 ieee80211_rx_list(hw, sta, skb, &list); 1207 1208 /* subsequent amsdu frames */ 1209 while (nskb) { 1210 skb = nskb; 1211 nskb = nskb->next; 1212 skb->next = NULL; 1213 1214 mt76_rx_convert(dev, skb, &hw, &sta); 1215 ieee80211_rx_list(hw, sta, skb, &list); 1216 } 1217 } 1218 spin_unlock(&dev->rx_lock); 1219 1220 if (!napi) { 1221 netif_receive_skb_list(&list); 1222 return; 1223 } 1224 1225 list_for_each_entry_safe(skb, tmp, &list, list) { 1226 skb_list_del_init(skb); 1227 napi_gro_receive(napi, skb); 1228 } 1229 } 1230 1231 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1232 struct napi_struct *napi) 1233 { 1234 struct sk_buff_head frames; 1235 struct sk_buff *skb; 1236 1237 __skb_queue_head_init(&frames); 1238 1239 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { 1240 mt76_check_sta(dev, skb); 1241 mt76_rx_aggr_reorder(skb, &frames); 1242 } 1243 1244 mt76_rx_complete(dev, &frames, napi); 1245 } 1246 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete); 1247 1248 static int 1249 mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif, 1250 struct ieee80211_sta *sta, bool ext_phy) 1251 { 1252 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1253 int ret; 1254 int i; 1255 1256 mutex_lock(&dev->mutex); 1257 1258 ret = dev->drv->sta_add(dev, vif, sta); 1259 if (ret) 1260 goto out; 1261 1262 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1263 struct mt76_txq *mtxq; 1264 1265 if (!sta->txq[i]) 1266 continue; 1267 1268 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; 1269 mtxq->wcid = wcid; 1270 } 1271 1272 ewma_signal_init(&wcid->rssi); 1273 if (ext_phy) 1274 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx); 1275 wcid->ext_phy = ext_phy; 1276 rcu_assign_pointer(dev->wcid[wcid->idx], wcid); 1277 1278 mt76_packet_id_init(wcid); 1279 out: 1280 mutex_unlock(&dev->mutex); 1281 1282 return ret; 1283 } 1284 1285 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1286 struct ieee80211_sta *sta) 1287 { 1288 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1289 int i, idx = wcid->idx; 1290 1291 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++) 1292 mt76_rx_aggr_stop(dev, wcid, i); 1293 1294 if (dev->drv->sta_remove) 1295 dev->drv->sta_remove(dev, vif, sta); 1296 1297 mt76_packet_id_flush(dev, wcid); 1298 1299 mt76_wcid_mask_clear(dev->wcid_mask, idx); 1300 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); 1301 } 1302 EXPORT_SYMBOL_GPL(__mt76_sta_remove); 1303 1304 static void 1305 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1306 struct ieee80211_sta *sta) 1307 { 1308 mutex_lock(&dev->mutex); 1309 __mt76_sta_remove(dev, vif, sta); 1310 mutex_unlock(&dev->mutex); 1311 } 1312 1313 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1314 struct ieee80211_sta *sta, 1315 enum ieee80211_sta_state old_state, 1316 enum ieee80211_sta_state new_state) 1317 { 1318 struct mt76_phy *phy = hw->priv; 1319 struct mt76_dev *dev = phy->dev; 1320 bool ext_phy = phy != &dev->phy; 1321 1322 if (old_state == IEEE80211_STA_NOTEXIST && 1323 new_state == IEEE80211_STA_NONE) 1324 return mt76_sta_add(dev, vif, sta, ext_phy); 1325 1326 if (old_state == IEEE80211_STA_AUTH && 1327 new_state == IEEE80211_STA_ASSOC && 1328 dev->drv->sta_assoc) 1329 dev->drv->sta_assoc(dev, vif, sta); 1330 1331 if (old_state == IEEE80211_STA_NONE && 1332 new_state == IEEE80211_STA_NOTEXIST) 1333 mt76_sta_remove(dev, vif, sta); 1334 1335 return 0; 1336 } 1337 EXPORT_SYMBOL_GPL(mt76_sta_state); 1338 1339 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1340 struct ieee80211_sta *sta) 1341 { 1342 struct mt76_phy *phy = hw->priv; 1343 struct mt76_dev *dev = phy->dev; 1344 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1345 1346 mutex_lock(&dev->mutex); 1347 rcu_assign_pointer(dev->wcid[wcid->idx], NULL); 1348 mutex_unlock(&dev->mutex); 1349 } 1350 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); 1351 1352 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1353 int *dbm) 1354 { 1355 struct mt76_phy *phy = hw->priv; 1356 int n_chains = hweight8(phy->antenna_mask); 1357 int delta = mt76_tx_power_nss_delta(n_chains); 1358 1359 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2); 1360 1361 return 0; 1362 } 1363 EXPORT_SYMBOL_GPL(mt76_get_txpower); 1364 1365 int mt76_init_sar_power(struct ieee80211_hw *hw, 1366 const struct cfg80211_sar_specs *sar) 1367 { 1368 struct mt76_phy *phy = hw->priv; 1369 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa; 1370 int i; 1371 1372 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs) 1373 return -EINVAL; 1374 1375 for (i = 0; i < sar->num_sub_specs; i++) { 1376 u32 index = sar->sub_specs[i].freq_range_index; 1377 /* SAR specifies power limitaton in 0.25dbm */ 1378 s32 power = sar->sub_specs[i].power >> 1; 1379 1380 if (power > 127 || power < -127) 1381 power = 127; 1382 1383 phy->frp[index].range = &capa->freq_ranges[index]; 1384 phy->frp[index].power = power; 1385 } 1386 1387 return 0; 1388 } 1389 EXPORT_SYMBOL_GPL(mt76_init_sar_power); 1390 1391 int mt76_get_sar_power(struct mt76_phy *phy, 1392 struct ieee80211_channel *chan, 1393 int power) 1394 { 1395 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa; 1396 int freq, i; 1397 1398 if (!capa || !phy->frp) 1399 return power; 1400 1401 if (power > 127 || power < -127) 1402 power = 127; 1403 1404 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band); 1405 for (i = 0 ; i < capa->num_freq_ranges; i++) { 1406 if (phy->frp[i].range && 1407 freq >= phy->frp[i].range->start_freq && 1408 freq < phy->frp[i].range->end_freq) { 1409 power = min_t(int, phy->frp[i].power, power); 1410 break; 1411 } 1412 } 1413 1414 return power; 1415 } 1416 EXPORT_SYMBOL_GPL(mt76_get_sar_power); 1417 1418 static void 1419 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 1420 { 1421 if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif)) 1422 ieee80211_csa_finish(vif); 1423 } 1424 1425 void mt76_csa_finish(struct mt76_dev *dev) 1426 { 1427 if (!dev->csa_complete) 1428 return; 1429 1430 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1431 IEEE80211_IFACE_ITER_RESUME_ALL, 1432 __mt76_csa_finish, dev); 1433 1434 dev->csa_complete = 0; 1435 } 1436 EXPORT_SYMBOL_GPL(mt76_csa_finish); 1437 1438 static void 1439 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif) 1440 { 1441 struct mt76_dev *dev = priv; 1442 1443 if (!vif->csa_active) 1444 return; 1445 1446 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif); 1447 } 1448 1449 void mt76_csa_check(struct mt76_dev *dev) 1450 { 1451 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1452 IEEE80211_IFACE_ITER_RESUME_ALL, 1453 __mt76_csa_check, dev); 1454 } 1455 EXPORT_SYMBOL_GPL(mt76_csa_check); 1456 1457 int 1458 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) 1459 { 1460 return 0; 1461 } 1462 EXPORT_SYMBOL_GPL(mt76_set_tim); 1463 1464 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id) 1465 { 1466 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1467 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 1468 u8 *hdr, *pn = status->iv; 1469 1470 __skb_push(skb, 8); 1471 memmove(skb->data, skb->data + 8, hdr_len); 1472 hdr = skb->data + hdr_len; 1473 1474 hdr[0] = pn[5]; 1475 hdr[1] = pn[4]; 1476 hdr[2] = 0; 1477 hdr[3] = 0x20 | (key_id << 6); 1478 hdr[4] = pn[3]; 1479 hdr[5] = pn[2]; 1480 hdr[6] = pn[1]; 1481 hdr[7] = pn[0]; 1482 1483 status->flag &= ~RX_FLAG_IV_STRIPPED; 1484 } 1485 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr); 1486 1487 int mt76_get_rate(struct mt76_dev *dev, 1488 struct ieee80211_supported_band *sband, 1489 int idx, bool cck) 1490 { 1491 int i, offset = 0, len = sband->n_bitrates; 1492 1493 if (cck) { 1494 if (sband != &dev->phy.sband_2g.sband) 1495 return 0; 1496 1497 idx &= ~BIT(2); /* short preamble */ 1498 } else if (sband == &dev->phy.sband_2g.sband) { 1499 offset = 4; 1500 } 1501 1502 for (i = offset; i < len; i++) { 1503 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx) 1504 return i; 1505 } 1506 1507 return 0; 1508 } 1509 EXPORT_SYMBOL_GPL(mt76_get_rate); 1510 1511 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1512 const u8 *mac) 1513 { 1514 struct mt76_phy *phy = hw->priv; 1515 1516 set_bit(MT76_SCANNING, &phy->state); 1517 } 1518 EXPORT_SYMBOL_GPL(mt76_sw_scan); 1519 1520 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1521 { 1522 struct mt76_phy *phy = hw->priv; 1523 1524 clear_bit(MT76_SCANNING, &phy->state); 1525 } 1526 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete); 1527 1528 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 1529 { 1530 struct mt76_phy *phy = hw->priv; 1531 struct mt76_dev *dev = phy->dev; 1532 1533 mutex_lock(&dev->mutex); 1534 *tx_ant = phy->antenna_mask; 1535 *rx_ant = phy->antenna_mask; 1536 mutex_unlock(&dev->mutex); 1537 1538 return 0; 1539 } 1540 EXPORT_SYMBOL_GPL(mt76_get_antenna); 1541 1542 struct mt76_queue * 1543 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, 1544 int ring_base) 1545 { 1546 struct mt76_queue *hwq; 1547 int err; 1548 1549 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL); 1550 if (!hwq) 1551 return ERR_PTR(-ENOMEM); 1552 1553 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); 1554 if (err < 0) 1555 return ERR_PTR(err); 1556 1557 return hwq; 1558 } 1559 EXPORT_SYMBOL_GPL(mt76_init_queue); 1560 1561 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx) 1562 { 1563 int offset = 0; 1564 1565 if (phy->chandef.chan->band != NL80211_BAND_2GHZ) 1566 offset = 4; 1567 1568 /* pick the lowest rate for hidden nodes */ 1569 if (rateidx < 0) 1570 rateidx = 0; 1571 1572 rateidx += offset; 1573 if (rateidx >= ARRAY_SIZE(mt76_rates)) 1574 rateidx = offset; 1575 1576 return mt76_rates[rateidx].hw_value; 1577 } 1578 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate); 1579 1580 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, 1581 struct mt76_sta_stats *stats) 1582 { 1583 int i, ei = wi->initial_stat_idx; 1584 u64 *data = wi->data; 1585 1586 wi->sta_count++; 1587 1588 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK]; 1589 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM]; 1590 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT]; 1591 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF]; 1592 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT]; 1593 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU]; 1594 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU]; 1595 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB]; 1596 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU]; 1597 1598 for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++) 1599 data[ei++] += stats->tx_bw[i]; 1600 1601 for (i = 0; i < 12; i++) 1602 data[ei++] += stats->tx_mcs[i]; 1603 1604 wi->worker_stat_count = ei - wi->initial_stat_idx; 1605 } 1606 EXPORT_SYMBOL_GPL(mt76_ethtool_worker); 1607